diff --git a/.travis.yml b/.travis.yml
index beeaae6277cbc480c1f7464c42cbf448c332ac0e..c914f5251f9ab27d7be14b3306774dc5c7d156f1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -12,6 +12,13 @@ before_install:
     - hash -r
     - which cmake
     - cmake --version
+    - wget https://www.nasm.us/pub/nasm/releasebuilds/2.13.03/nasm-2.13.03.tar.xz
+    - tar -xvf nasm-2.13.03.tar.xz
+    - cd nasm-2.13.03
+    - ./configure
+    - make
+    - sudo make install
+    - nasm --version
 script:
     - |
         cargo build --verbose &&
diff --git a/Cargo.toml b/Cargo.toml
index 78736e99613ef4067b441541cde1bb02a075c9df..600f4846323798dd3680f0474b524b9e29094095 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -27,6 +27,9 @@ num-traits = "0.2"
 [build-dependencies]
 cmake = "0.1.32"
 
+[target.'cfg(target_arch = "x86_64")'.build-dependencies]
+nasm-rs = { git = "https://github.com/tdaede/nasm-rs.git" }
+
 [target.'cfg(unix)'.build-dependencies]
 pkg-config = "0.3.12"
 bindgen = { version = "0.37", optional = true }
diff --git a/appveyor.yml b/appveyor.yml
index b10bba12622f18c209e426f4e5d3687a9a74682b..c6788c6855b62bc9dee88c8de6a802b40e8cb8ef 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -6,8 +6,11 @@ environment:
       target: x86_64-pc-windows-msvc
 
 install:
+    - call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvars64.bat"
     - appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe
     - appveyor DownloadFile http://www.tortall.net/projects/yasm/releases/yasm-1.3.0-win64.exe -FileName yasm.exe
+    - appveyor DownloadFile https://www.nasm.us/pub/nasm/releasebuilds/2.13.03/win64/nasm-2.13.03-win64.zip -FileName nasm.zip
+    - 7z e -y nasm.zip
     - rustup-init -yv --default-toolchain %channel% --default-host %target%
     - set PATH=%PATH%;%USERPROFILE%\.cargo\bin;%APPVEYOR_BUILD_FOLDER%
     - rustc -vV
diff --git a/build.rs b/build.rs
index 0a45976ae6eadf1d80e88134b1da79b0998eb24b..cc6a321c8f4f5b807c22587d716844309498de77 100644
--- a/build.rs
+++ b/build.rs
@@ -6,12 +6,32 @@ extern crate pkg_config;
 #[cfg(unix)]
 #[cfg(feature = "decode_test")]
 extern crate bindgen;
+#[cfg(target_arch = "x86_64")]
+extern crate nasm_rs;
 
 use std::env;
 use std::fs;
+use std::fs::File;
+use std::io::Write;
 use std::path::Path;
 
 fn main() {
+    if cfg!(target_arch = "x86_64") {
+        let out_dir = env::var("OUT_DIR").unwrap();
+        {
+            let dest_path = Path::new(&out_dir).join("config.asm");
+            let mut config_file = File::create(dest_path).unwrap();
+            config_file.write(b"	%define ARCH_X86_32 0\n").unwrap();
+            config_file.write(b" %define ARCH_X86_64 1\n").unwrap();
+            config_file.write(b"	%define PIC 1\n").unwrap();
+            config_file.write(b" %define STACK_ALIGNMENT 32\n").unwrap();
+        }
+        let mut config_include_arg = String::from("-I");
+        config_include_arg.push_str(&out_dir);
+        config_include_arg.push('/');
+        nasm_rs::compile_library_args("rav1easm", &["src/x86/mc.asm"], &[&config_include_arg, "-Isrc/"]);
+    }
+
     if cfg!(windows) && cfg!(feature = "decode_test") {
         panic!("Unsupported feature on this platform!");
     }
diff --git a/src/ext/x86/x86inc.asm b/src/ext/x86/x86inc.asm
new file mode 100644
index 0000000000000000000000000000000000000000..20b7b9d9adc095931abc7e4dc05853634baed107
--- /dev/null
+++ b/src/ext/x86/x86inc.asm
@@ -0,0 +1,1729 @@
+;*****************************************************************************
+;* x86inc.asm: x264asm abstraction layer
+;*****************************************************************************
+;* Copyright (C) 2005-2018 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;*          Henrik Gramner <henrik@gramner.com>
+;*          Anton Mitrofanov <BugMaster@narod.ru>
+;*          Fiona Glaser <fiona@x264.com>
+;*
+;* Permission to use, copy, modify, and/or distribute this software for any
+;* purpose with or without fee is hereby granted, provided that the above
+;* copyright notice and this permission notice appear in all copies.
+;*
+;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+;*****************************************************************************
+
+; This is a header file for the x264ASM assembly language, which uses
+; NASM/YASM syntax combined with a large number of macros to provide easy
+; abstraction between different calling conventions (x86_32, win64, linux64).
+; It also has various other useful features to simplify writing the kind of
+; DSP functions that are most often used in x264.
+
+; Unlike the rest of x264, this file is available under an ISC license, as it
+; has significant usefulness outside of x264 and we want it to be available
+; to the largest audience possible.  Of course, if you modify it for your own
+; purposes to add a new feature, we strongly encourage contributing a patch
+; as this feature might be useful for others as well.  Send patches or ideas
+; to x264-devel@videolan.org .
+
+%ifndef private_prefix
+    %define private_prefix dav1d
+%endif
+
+%ifndef public_prefix
+    %define public_prefix private_prefix
+%endif
+
+%ifndef STACK_ALIGNMENT
+    %if ARCH_X86_64
+        %define STACK_ALIGNMENT 16
+    %else
+        %define STACK_ALIGNMENT 4
+    %endif
+%endif
+
+%define WIN64  0
+%define UNIX64 0
+%if ARCH_X86_64
+    %ifidn __OUTPUT_FORMAT__,win32
+        %define WIN64  1
+    %elifidn __OUTPUT_FORMAT__,win64
+        %define WIN64  1
+    %elifidn __OUTPUT_FORMAT__,x64
+        %define WIN64  1
+    %else
+        %define UNIX64 1
+    %endif
+%endif
+
+%define FORMAT_ELF 0
+%ifidn __OUTPUT_FORMAT__,elf
+    %define FORMAT_ELF 1
+%elifidn __OUTPUT_FORMAT__,elf32
+    %define FORMAT_ELF 1
+%elifidn __OUTPUT_FORMAT__,elf64
+    %define FORMAT_ELF 1
+%endif
+
+%ifdef PREFIX
+    %define mangle(x) _ %+ x
+%else
+    %define mangle(x) x
+%endif
+
+%macro SECTION_RODATA 0-1 16
+    %ifidn __OUTPUT_FORMAT__,win32
+        SECTION .rdata align=%1
+    %elif WIN64
+        SECTION .rdata align=%1
+    %else
+        SECTION .rodata align=%1
+    %endif
+%endmacro
+
+%if WIN64
+    %define PIC
+%elif ARCH_X86_64 == 0
+; x86_32 doesn't require PIC.
+; Some distros prefer shared objects to be PIC, but nothing breaks if
+; the code contains a few textrels, so we'll skip that complexity.
+    %undef PIC
+%endif
+%ifdef PIC
+    default rel
+%endif
+
+%ifdef __NASM_VER__
+    %use smartalign
+%endif
+
+; Macros to eliminate most code duplication between x86_32 and x86_64:
+; Currently this works only for leaf functions which load all their arguments
+; into registers at the start, and make no other use of the stack. Luckily that
+; covers most of x264's asm.
+
+; PROLOGUE:
+; %1 = number of arguments. loads them from stack if needed.
+; %2 = number of registers used. pushes callee-saved regs if needed.
+; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
+; %4 = (optional) stack size to be allocated. The stack will be aligned before
+;      allocating the specified stack size. If the required stack alignment is
+;      larger than the known stack alignment the stack will be manually aligned
+;      and an extra register will be allocated to hold the original stack
+;      pointer (to not invalidate r0m etc.). To prevent the use of an extra
+;      register as stack pointer, request a negative stack size.
+; %4+/%5+ = list of names to define to registers
+; PROLOGUE can also be invoked by adding the same options to cglobal
+
+; e.g.
+; cglobal foo, 2,3,7,0x40, dst, src, tmp
+; declares a function (foo) that automatically loads two arguments (dst and
+; src) into registers, uses one additional register (tmp) plus 7 vector
+; registers (m0-m6) and allocates 0x40 bytes of stack space.
+
+; TODO Some functions can use some args directly from the stack. If they're the
+; last args then you can just not declare them, but if they're in the middle
+; we need more flexible macro.
+
+; RET:
+; Pops anything that was pushed by PROLOGUE, and returns.
+
+; REP_RET:
+; Use this instead of RET if it's a branch target.
+
+; registers:
+; rN and rNq are the native-size register holding function argument N
+; rNd, rNw, rNb are dword, word, and byte size
+; rNh is the high 8 bits of the word size
+; rNm is the original location of arg N (a register or on the stack), dword
+; rNmp is native size
+
+%macro DECLARE_REG 2-3
+    %define r%1q %2
+    %define r%1d %2d
+    %define r%1w %2w
+    %define r%1b %2b
+    %define r%1h %2h
+    %define %2q %2
+    %if %0 == 2
+        %define r%1m  %2d
+        %define r%1mp %2
+    %elif ARCH_X86_64 ; memory
+        %define r%1m [rstk + stack_offset + %3]
+        %define r%1mp qword r %+ %1 %+ m
+    %else
+        %define r%1m [rstk + stack_offset + %3]
+        %define r%1mp dword r %+ %1 %+ m
+    %endif
+    %define r%1  %2
+%endmacro
+
+%macro DECLARE_REG_SIZE 3
+    %define r%1q r%1
+    %define e%1q r%1
+    %define r%1d e%1
+    %define e%1d e%1
+    %define r%1w %1
+    %define e%1w %1
+    %define r%1h %3
+    %define e%1h %3
+    %define r%1b %2
+    %define e%1b %2
+    %if ARCH_X86_64 == 0
+        %define r%1 e%1
+    %endif
+%endmacro
+
+DECLARE_REG_SIZE ax, al, ah
+DECLARE_REG_SIZE bx, bl, bh
+DECLARE_REG_SIZE cx, cl, ch
+DECLARE_REG_SIZE dx, dl, dh
+DECLARE_REG_SIZE si, sil, null
+DECLARE_REG_SIZE di, dil, null
+DECLARE_REG_SIZE bp, bpl, null
+
+; t# defines for when per-arch register allocation is more complex than just function arguments
+
+%macro DECLARE_REG_TMP 1-*
+    %assign %%i 0
+    %rep %0
+        CAT_XDEFINE t, %%i, r%1
+        %assign %%i %%i+1
+        %rotate 1
+    %endrep
+%endmacro
+
+%macro DECLARE_REG_TMP_SIZE 0-*
+    %rep %0
+        %define t%1q t%1 %+ q
+        %define t%1d t%1 %+ d
+        %define t%1w t%1 %+ w
+        %define t%1h t%1 %+ h
+        %define t%1b t%1 %+ b
+        %rotate 1
+    %endrep
+%endmacro
+
+DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
+
+%if ARCH_X86_64
+    %define gprsize 8
+%else
+    %define gprsize 4
+%endif
+
+%macro PUSH 1
+    push %1
+    %ifidn rstk, rsp
+        %assign stack_offset stack_offset+gprsize
+    %endif
+%endmacro
+
+%macro POP 1
+    pop %1
+    %ifidn rstk, rsp
+        %assign stack_offset stack_offset-gprsize
+    %endif
+%endmacro
+
+%macro PUSH_IF_USED 1-*
+    %rep %0
+        %if %1 < regs_used
+            PUSH r%1
+        %endif
+        %rotate 1
+    %endrep
+%endmacro
+
+%macro POP_IF_USED 1-*
+    %rep %0
+        %if %1 < regs_used
+            pop r%1
+        %endif
+        %rotate 1
+    %endrep
+%endmacro
+
+%macro LOAD_IF_USED 1-*
+    %rep %0
+        %if %1 < num_args
+            mov r%1, r %+ %1 %+ mp
+        %endif
+        %rotate 1
+    %endrep
+%endmacro
+
+%macro SUB 2
+    sub %1, %2
+    %ifidn %1, rstk
+        %assign stack_offset stack_offset+(%2)
+    %endif
+%endmacro
+
+%macro ADD 2
+    add %1, %2
+    %ifidn %1, rstk
+        %assign stack_offset stack_offset-(%2)
+    %endif
+%endmacro
+
+%macro movifnidn 2
+    %ifnidn %1, %2
+        mov %1, %2
+    %endif
+%endmacro
+
+%macro movsxdifnidn 2
+    %ifnidn %1, %2
+        movsxd %1, %2
+    %endif
+%endmacro
+
+%macro ASSERT 1
+    %if (%1) == 0
+        %error assertion ``%1'' failed
+    %endif
+%endmacro
+
+%macro DEFINE_ARGS 0-*
+    %ifdef n_arg_names
+        %assign %%i 0
+        %rep n_arg_names
+            CAT_UNDEF arg_name %+ %%i, q
+            CAT_UNDEF arg_name %+ %%i, d
+            CAT_UNDEF arg_name %+ %%i, w
+            CAT_UNDEF arg_name %+ %%i, h
+            CAT_UNDEF arg_name %+ %%i, b
+            CAT_UNDEF arg_name %+ %%i, m
+            CAT_UNDEF arg_name %+ %%i, mp
+            CAT_UNDEF arg_name, %%i
+            %assign %%i %%i+1
+        %endrep
+    %endif
+
+    %xdefine %%stack_offset stack_offset
+    %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
+    %assign %%i 0
+    %rep %0
+        %xdefine %1q r %+ %%i %+ q
+        %xdefine %1d r %+ %%i %+ d
+        %xdefine %1w r %+ %%i %+ w
+        %xdefine %1h r %+ %%i %+ h
+        %xdefine %1b r %+ %%i %+ b
+        %xdefine %1m r %+ %%i %+ m
+        %xdefine %1mp r %+ %%i %+ mp
+        CAT_XDEFINE arg_name, %%i, %1
+        %assign %%i %%i+1
+        %rotate 1
+    %endrep
+    %xdefine stack_offset %%stack_offset
+    %assign n_arg_names %0
+%endmacro
+
+%define required_stack_alignment ((mmsize + 15) & ~15)
+%define vzeroupper_required (mmsize > 16 && (ARCH_X86_64 == 0 || xmm_regs_used > 16 || notcpuflag(avx512)))
+%define high_mm_regs (16*cpuflag(avx512))
+
+%macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
+    %ifnum %1
+        %if %1 != 0
+            %assign %%pad 0
+            %assign stack_size %1
+            %if stack_size < 0
+                %assign stack_size -stack_size
+            %endif
+            %if WIN64
+                %assign %%pad %%pad + 32 ; shadow space
+                %if mmsize != 8
+                    %assign xmm_regs_used %2
+                    %if xmm_regs_used > 8
+                        %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers
+                    %endif
+                %endif
+            %endif
+            %if required_stack_alignment <= STACK_ALIGNMENT
+                ; maintain the current stack alignment
+                %assign stack_size_padded stack_size + %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
+                SUB rsp, stack_size_padded
+            %else
+                %assign %%reg_num (regs_used - 1)
+                %xdefine rstk r %+ %%reg_num
+                ; align stack, and save original stack location directly above
+                ; it, i.e. in [rsp+stack_size_padded], so we can restore the
+                ; stack in a single instruction (i.e. mov rsp, rstk or mov
+                ; rsp, [rsp+stack_size_padded])
+                %if %1 < 0 ; need to store rsp on stack
+                    %xdefine rstkm [rsp + stack_size + %%pad]
+                    %assign %%pad %%pad + gprsize
+                %else ; can keep rsp in rstk during whole function
+                    %xdefine rstkm rstk
+                %endif
+                %assign stack_size_padded stack_size + ((%%pad + required_stack_alignment-1) & ~(required_stack_alignment-1))
+                mov rstk, rsp
+                and rsp, ~(required_stack_alignment-1)
+                sub rsp, stack_size_padded
+                movifnidn rstkm, rstk
+            %endif
+            WIN64_PUSH_XMM
+        %endif
+    %endif
+%endmacro
+
+%macro SETUP_STACK_POINTER 1
+    %ifnum %1
+        %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT
+            %if %1 > 0
+                ; Reserve an additional register for storing the original stack pointer, but avoid using
+                ; eax/rax for this purpose since it can potentially get overwritten as a return value.
+                %assign regs_used (regs_used + 1)
+                %if ARCH_X86_64 && regs_used == 7
+                    %assign regs_used 8
+                %elif ARCH_X86_64 == 0 && regs_used == 1
+                    %assign regs_used 2
+                %endif
+            %endif
+            %if ARCH_X86_64 && regs_used < 5 + UNIX64 * 3
+                ; Ensure that we don't clobber any registers containing arguments. For UNIX64 we also preserve r6 (rax)
+                ; since it's used as a hidden argument in vararg functions to specify the number of vector registers used.
+                %assign regs_used 5 + UNIX64 * 3
+            %endif
+        %endif
+    %endif
+%endmacro
+
+%macro DEFINE_ARGS_INTERNAL 3+
+    %ifnum %2
+        DEFINE_ARGS %3
+    %elif %1 == 4
+        DEFINE_ARGS %2
+    %elif %1 > 4
+        DEFINE_ARGS %2, %3
+    %endif
+%endmacro
+
+%if WIN64 ; Windows x64 ;=================================================
+
+DECLARE_REG 0,  rcx
+DECLARE_REG 1,  rdx
+DECLARE_REG 2,  R8
+DECLARE_REG 3,  R9
+DECLARE_REG 4,  R10, 40
+DECLARE_REG 5,  R11, 48
+DECLARE_REG 6,  rax, 56
+DECLARE_REG 7,  rdi, 64
+DECLARE_REG 8,  rsi, 72
+DECLARE_REG 9,  rbx, 80
+DECLARE_REG 10, rbp, 88
+DECLARE_REG 11, R14, 96
+DECLARE_REG 12, R15, 104
+DECLARE_REG 13, R12, 112
+DECLARE_REG 14, R13, 120
+
+%macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+    %assign num_args %1
+    %assign regs_used %2
+    ASSERT regs_used >= num_args
+    SETUP_STACK_POINTER %4
+    ASSERT regs_used <= 15
+    PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
+    ALLOC_STACK %4, %3
+    %if mmsize != 8 && stack_size == 0
+        WIN64_SPILL_XMM %3
+    %endif
+    LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+    DEFINE_ARGS_INTERNAL %0, %4, %5
+%endmacro
+
+%macro WIN64_PUSH_XMM 0
+    ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated.
+    %if xmm_regs_used > 6 + high_mm_regs
+        movaps [rstk + stack_offset +  8], xmm6
+    %endif
+    %if xmm_regs_used > 7 + high_mm_regs
+        movaps [rstk + stack_offset + 24], xmm7
+    %endif
+    %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
+    %if %%xmm_regs_on_stack > 0
+        %assign %%i 8
+        %rep %%xmm_regs_on_stack
+            movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i
+            %assign %%i %%i+1
+        %endrep
+    %endif
+%endmacro
+
+%macro WIN64_SPILL_XMM 1
+    %assign xmm_regs_used %1
+    ASSERT xmm_regs_used <= 16 + high_mm_regs
+    %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
+    %if %%xmm_regs_on_stack > 0
+        ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack.
+        %assign %%pad %%xmm_regs_on_stack*16 + 32
+        %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
+        SUB rsp, stack_size_padded
+    %endif
+    WIN64_PUSH_XMM
+%endmacro
+
+%macro WIN64_RESTORE_XMM_INTERNAL 0
+    %assign %%pad_size 0
+    %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
+    %if %%xmm_regs_on_stack > 0
+        %assign %%i xmm_regs_used - high_mm_regs
+        %rep %%xmm_regs_on_stack
+            %assign %%i %%i-1
+            movaps xmm %+ %%i, [rsp + (%%i-8)*16 + stack_size + 32]
+        %endrep
+    %endif
+    %if stack_size_padded > 0
+        %if stack_size > 0 && required_stack_alignment > STACK_ALIGNMENT
+            mov rsp, rstkm
+        %else
+            add rsp, stack_size_padded
+            %assign %%pad_size stack_size_padded
+        %endif
+    %endif
+    %if xmm_regs_used > 7 + high_mm_regs
+        movaps xmm7, [rsp + stack_offset - %%pad_size + 24]
+    %endif
+    %if xmm_regs_used > 6 + high_mm_regs
+        movaps xmm6, [rsp + stack_offset - %%pad_size +  8]
+    %endif
+%endmacro
+
+%macro WIN64_RESTORE_XMM 0
+    WIN64_RESTORE_XMM_INTERNAL
+    %assign stack_offset (stack_offset-stack_size_padded)
+    %assign stack_size_padded 0
+    %assign xmm_regs_used 0
+%endmacro
+
+%define has_epilogue regs_used > 7 || stack_size > 0 || vzeroupper_required || xmm_regs_used > 6+high_mm_regs
+
+%macro RET 0
+    WIN64_RESTORE_XMM_INTERNAL
+    POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
+    %if vzeroupper_required
+        vzeroupper
+    %endif
+    AUTO_REP_RET
+%endmacro
+
+%elif ARCH_X86_64 ; *nix x64 ;=============================================
+
+DECLARE_REG 0,  rdi
+DECLARE_REG 1,  rsi
+DECLARE_REG 2,  rdx
+DECLARE_REG 3,  rcx
+DECLARE_REG 4,  R8
+DECLARE_REG 5,  R9
+DECLARE_REG 6,  rax, 8
+DECLARE_REG 7,  R10, 16
+DECLARE_REG 8,  R11, 24
+DECLARE_REG 9,  rbx, 32
+DECLARE_REG 10, rbp, 40
+DECLARE_REG 11, R14, 48
+DECLARE_REG 12, R15, 56
+DECLARE_REG 13, R12, 64
+DECLARE_REG 14, R13, 72
+
+%macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+    %assign num_args %1
+    %assign regs_used %2
+    %assign xmm_regs_used %3
+    ASSERT regs_used >= num_args
+    SETUP_STACK_POINTER %4
+    ASSERT regs_used <= 15
+    PUSH_IF_USED 9, 10, 11, 12, 13, 14
+    ALLOC_STACK %4
+    LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
+    DEFINE_ARGS_INTERNAL %0, %4, %5
+%endmacro
+
+%define has_epilogue regs_used > 9 || stack_size > 0 || vzeroupper_required
+
+%macro RET 0
+    %if stack_size_padded > 0
+        %if required_stack_alignment > STACK_ALIGNMENT
+            mov rsp, rstkm
+        %else
+            add rsp, stack_size_padded
+        %endif
+    %endif
+    POP_IF_USED 14, 13, 12, 11, 10, 9
+    %if vzeroupper_required
+        vzeroupper
+    %endif
+    AUTO_REP_RET
+%endmacro
+
+%else ; X86_32 ;==============================================================
+
+DECLARE_REG 0, eax, 4
+DECLARE_REG 1, ecx, 8
+DECLARE_REG 2, edx, 12
+DECLARE_REG 3, ebx, 16
+DECLARE_REG 4, esi, 20
+DECLARE_REG 5, edi, 24
+DECLARE_REG 6, ebp, 28
+%define rsp esp
+
+%macro DECLARE_ARG 1-*
+    %rep %0
+        %define r%1m [rstk + stack_offset + 4*%1 + 4]
+        %define r%1mp dword r%1m
+        %rotate 1
+    %endrep
+%endmacro
+
+DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
+
+%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+    %assign num_args %1
+    %assign regs_used %2
+    ASSERT regs_used >= num_args
+    %if num_args > 7
+        %assign num_args 7
+    %endif
+    %if regs_used > 7
+        %assign regs_used 7
+    %endif
+    SETUP_STACK_POINTER %4
+    ASSERT regs_used <= 7
+    PUSH_IF_USED 3, 4, 5, 6
+    ALLOC_STACK %4
+    LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
+    DEFINE_ARGS_INTERNAL %0, %4, %5
+%endmacro
+
+%define has_epilogue regs_used > 3 || stack_size > 0 || vzeroupper_required
+
+%macro RET 0
+    %if stack_size_padded > 0
+        %if required_stack_alignment > STACK_ALIGNMENT
+            mov rsp, rstkm
+        %else
+            add rsp, stack_size_padded
+        %endif
+    %endif
+    POP_IF_USED 6, 5, 4, 3
+    %if vzeroupper_required
+        vzeroupper
+    %endif
+    AUTO_REP_RET
+%endmacro
+
+%endif ;======================================================================
+
+%if WIN64 == 0
+    %macro WIN64_SPILL_XMM 1
+    %endmacro
+    %macro WIN64_RESTORE_XMM 0
+    %endmacro
+    %macro WIN64_PUSH_XMM 0
+    %endmacro
+%endif
+
+; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
+; a branch or a branch target. So switch to a 2-byte form of ret in that case.
+; We can automatically detect "follows a branch", but not a branch target.
+; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
+%macro REP_RET 0
+    %if has_epilogue || cpuflag(ssse3)
+        RET
+    %else
+        rep ret
+    %endif
+    annotate_function_size
+%endmacro
+
+%define last_branch_adr $$
+%macro AUTO_REP_RET 0
+    %if notcpuflag(ssse3)
+        times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ == last_branch_adr.
+    %endif
+    ret
+    annotate_function_size
+%endmacro
+
+%macro BRANCH_INSTR 0-*
+    %rep %0
+        %macro %1 1-2 %1
+            %2 %1
+            %if notcpuflag(ssse3)
+                %%branch_instr equ $
+                %xdefine last_branch_adr %%branch_instr
+            %endif
+        %endmacro
+        %rotate 1
+    %endrep
+%endmacro
+
+BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp
+
+%macro TAIL_CALL 2 ; callee, is_nonadjacent
+    %if has_epilogue
+        call %1
+        RET
+    %elif %2
+        jmp %1
+    %endif
+    annotate_function_size
+%endmacro
+
+;=============================================================================
+; arch-independent part
+;=============================================================================
+
+%assign function_align 16
+
+; Begin a function.
+; Applies any symbol mangling needed for C linkage, and sets up a define such that
+; subsequent uses of the function name automatically refer to the mangled version.
+; Appends cpuflags to the function name if cpuflags has been specified.
+; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
+; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
+%macro cglobal 1-2+ "" ; name, [PROLOGUE args]
+    cglobal_internal 1, %1 %+ SUFFIX, %2
+%endmacro
+%macro cvisible 1-2+ "" ; name, [PROLOGUE args]
+    cglobal_internal 0, %1 %+ SUFFIX, %2
+%endmacro
+%macro cglobal_internal 2-3+
+    annotate_function_size
+    %if %1
+        %xdefine %%FUNCTION_PREFIX private_prefix
+        %xdefine %%VISIBILITY hidden
+    %else
+        %xdefine %%FUNCTION_PREFIX public_prefix
+        %xdefine %%VISIBILITY
+    %endif
+    %ifndef cglobaled_%2
+        %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
+        %xdefine %2.skip_prologue %2 %+ .skip_prologue
+        CAT_XDEFINE cglobaled_, %2, 1
+    %endif
+    %xdefine current_function %2
+    %xdefine current_function_section __SECT__
+    %if FORMAT_ELF
+        global %2:function %%VISIBILITY
+    %else
+        global %2
+    %endif
+    align function_align
+    %2:
+    RESET_MM_PERMUTATION        ; needed for x86-64, also makes disassembly somewhat nicer
+    %xdefine rstk rsp           ; copy of the original stack pointer, used when greater alignment than the known stack alignment is required
+    %assign stack_offset 0      ; stack pointer offset relative to the return address
+    %assign stack_size 0        ; amount of stack space that can be freely used inside a function
+    %assign stack_size_padded 0 ; total amount of allocated stack space, including space for callee-saved xmm registers on WIN64 and alignment padding
+    %assign xmm_regs_used 0     ; number of XMM registers requested, used for dealing with callee-saved registers on WIN64 and vzeroupper
+    %ifnidn %3, ""
+        PROLOGUE %3
+    %endif
+%endmacro
+
+; Create a global symbol from a local label with the correct name mangling and type
+%macro cglobal_label 1
+    %if FORMAT_ELF
+        global current_function %+ %1:function hidden
+    %else
+        global current_function %+ %1
+    %endif
+    %1:
+%endmacro
+
+%macro cextern 1
+    %xdefine %1 mangle(private_prefix %+ _ %+ %1)
+    CAT_XDEFINE cglobaled_, %1, 1
+    extern %1
+%endmacro
+
+; like cextern, but without the prefix
+%macro cextern_naked 1
+    %ifdef PREFIX
+        %xdefine %1 mangle(%1)
+    %endif
+    CAT_XDEFINE cglobaled_, %1, 1
+    extern %1
+%endmacro
+
+%macro const 1-2+
+    %xdefine %1 mangle(private_prefix %+ _ %+ %1)
+    %if FORMAT_ELF
+        global %1:data hidden
+    %else
+        global %1
+    %endif
+    %1: %2
+%endmacro
+
+; This is needed for ELF, otherwise the GNU linker assumes the stack is executable by default.
+%if FORMAT_ELF
+    [SECTION .note.GNU-stack noalloc noexec nowrite progbits]
+%endif
+
+; Tell debuggers how large the function was.
+; This may be invoked multiple times per function; we rely on later instances overriding earlier ones.
+; This is invoked by RET and similar macros, and also cglobal does it for the previous function,
+; but if the last function in a source file doesn't use any of the standard macros for its epilogue,
+; then its size might be unspecified.
+%macro annotate_function_size 0
+    %ifdef __YASM_VER__
+        %ifdef current_function
+            %if FORMAT_ELF
+                current_function_section
+                %%ecf equ $
+                size current_function %%ecf - current_function
+                __SECT__
+            %endif
+        %endif
+    %endif
+%endmacro
+
+; cpuflags
+
+%assign cpuflags_mmx      (1<<0)
+%assign cpuflags_mmx2     (1<<1) | cpuflags_mmx
+%assign cpuflags_3dnow    (1<<2) | cpuflags_mmx
+%assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
+%assign cpuflags_sse      (1<<4) | cpuflags_mmx2
+%assign cpuflags_sse2     (1<<5) | cpuflags_sse
+%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
+%assign cpuflags_lzcnt    (1<<7) | cpuflags_sse2
+%assign cpuflags_sse3     (1<<8) | cpuflags_sse2
+%assign cpuflags_ssse3    (1<<9) | cpuflags_sse3
+%assign cpuflags_sse4     (1<<10)| cpuflags_ssse3
+%assign cpuflags_sse42    (1<<11)| cpuflags_sse4
+%assign cpuflags_aesni    (1<<12)| cpuflags_sse42
+%assign cpuflags_avx      (1<<13)| cpuflags_sse42
+%assign cpuflags_xop      (1<<14)| cpuflags_avx
+%assign cpuflags_fma4     (1<<15)| cpuflags_avx
+%assign cpuflags_fma3     (1<<16)| cpuflags_avx
+%assign cpuflags_bmi1     (1<<17)| cpuflags_avx|cpuflags_lzcnt
+%assign cpuflags_bmi2     (1<<18)| cpuflags_bmi1
+%assign cpuflags_avx2     (1<<19)| cpuflags_fma3|cpuflags_bmi2
+%assign cpuflags_avx512   (1<<20)| cpuflags_avx2 ; F, CD, BW, DQ, VL
+
+%assign cpuflags_cache32  (1<<21)
+%assign cpuflags_cache64  (1<<22)
+%assign cpuflags_aligned  (1<<23) ; not a cpu feature, but a function variant
+%assign cpuflags_atom     (1<<24)
+
+; Returns a boolean value expressing whether or not the specified cpuflag is enabled.
+%define    cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1)
+%define notcpuflag(x) (cpuflag(x) ^ 1)
+
+; Takes an arbitrary number of cpuflags from the above list.
+; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
+; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
+%macro INIT_CPUFLAGS 0-*
+    %xdefine SUFFIX
+    %undef cpuname
+    %assign cpuflags 0
+
+    %if %0 >= 1
+        %rep %0
+            %ifdef cpuname
+                %xdefine cpuname cpuname %+ _%1
+            %else
+                %xdefine cpuname %1
+            %endif
+            %assign cpuflags cpuflags | cpuflags_%1
+            %rotate 1
+        %endrep
+        %xdefine SUFFIX _ %+ cpuname
+
+        %if cpuflag(avx)
+            %assign avx_enabled 1
+        %endif
+        %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2))
+            %define mova movaps
+            %define movu movups
+            %define movnta movntps
+        %endif
+        %if cpuflag(aligned)
+            %define movu mova
+        %elif cpuflag(sse3) && notcpuflag(ssse3)
+            %define movu lddqu
+        %endif
+    %endif
+
+    %if ARCH_X86_64 || cpuflag(sse2)
+        %ifdef __NASM_VER__
+            ALIGNMODE p6
+        %else
+            CPU amdnop
+        %endif
+    %else
+        %ifdef __NASM_VER__
+            ALIGNMODE nop
+        %else
+            CPU basicnop
+        %endif
+    %endif
+%endmacro
+
+; Merge mmx, sse*, and avx*
+; m# is a simd register of the currently selected size
+; xm# is the corresponding xmm register if mmsize >= 16, otherwise the same as m#
+; ym# is the corresponding ymm register if mmsize >= 32, otherwise the same as m#
+; zm# is the corresponding zmm register if mmsize >= 64, otherwise the same as m#
+; (All 4 remain in sync through SWAP.)
+
+%macro CAT_XDEFINE 3
+    %xdefine %1%2 %3
+%endmacro
+
+%macro CAT_UNDEF 2
+    %undef %1%2
+%endmacro
+
+%macro DEFINE_MMREGS 1 ; mmtype
+    %assign %%prev_mmregs 0
+    %ifdef num_mmregs
+        %assign %%prev_mmregs num_mmregs
+    %endif
+
+    %assign num_mmregs 8
+    %if ARCH_X86_64 && mmsize >= 16
+        %assign num_mmregs 16
+        %if cpuflag(avx512) || mmsize == 64
+            %assign num_mmregs 32
+        %endif
+    %endif
+
+    %assign %%i 0
+    %rep num_mmregs
+        CAT_XDEFINE m, %%i, %1 %+ %%i
+        CAT_XDEFINE nn%1, %%i, %%i
+        %assign %%i %%i+1
+    %endrep
+    %if %%prev_mmregs > num_mmregs
+        %rep %%prev_mmregs - num_mmregs
+            CAT_UNDEF m, %%i
+            CAT_UNDEF nn %+ mmtype, %%i
+            %assign %%i %%i+1
+        %endrep
+    %endif
+    %xdefine mmtype %1
+%endmacro
+
+; Prefer registers 16-31 over 0-15 to avoid having to use vzeroupper
+%macro AVX512_MM_PERMUTATION 0-1 0 ; start_reg
+    %if ARCH_X86_64 && cpuflag(avx512)
+        %assign %%i %1
+        %rep 16-%1
+            %assign %%i_high %%i+16
+            SWAP %%i, %%i_high
+            %assign %%i %%i+1
+        %endrep
+    %endif
+%endmacro
+
+%macro INIT_MMX 0-1+
+    %assign avx_enabled 0
+    %define RESET_MM_PERMUTATION INIT_MMX %1
+    %define mmsize 8
+    %define mova movq
+    %define movu movq
+    %define movh movd
+    %define movnta movntq
+    INIT_CPUFLAGS %1
+    DEFINE_MMREGS mm
+%endmacro
+
+%macro INIT_XMM 0-1+
+    %assign avx_enabled 0
+    %define RESET_MM_PERMUTATION INIT_XMM %1
+    %define mmsize 16
+    %define mova movdqa
+    %define movu movdqu
+    %define movh movq
+    %define movnta movntdq
+    INIT_CPUFLAGS %1
+    DEFINE_MMREGS xmm
+    %if WIN64
+        AVX512_MM_PERMUTATION 6 ; Swap callee-saved registers with volatile registers
+    %endif
+%endmacro
+
+%macro INIT_YMM 0-1+
+    %assign avx_enabled 1
+    %define RESET_MM_PERMUTATION INIT_YMM %1
+    %define mmsize 32
+    %define mova movdqa
+    %define movu movdqu
+    %undef movh
+    %define movnta movntdq
+    INIT_CPUFLAGS %1
+    DEFINE_MMREGS ymm
+    AVX512_MM_PERMUTATION
+%endmacro
+
+%macro INIT_ZMM 0-1+
+    %assign avx_enabled 1
+    %define RESET_MM_PERMUTATION INIT_ZMM %1
+    %define mmsize 64
+    %define mova movdqa
+    %define movu movdqu
+    %undef movh
+    %define movnta movntdq
+    INIT_CPUFLAGS %1
+    DEFINE_MMREGS zmm
+    AVX512_MM_PERMUTATION
+%endmacro
+
+INIT_XMM
+
+%macro DECLARE_MMCAST 1
+    %define  mmmm%1   mm%1
+    %define  mmxmm%1  mm%1
+    %define  mmymm%1  mm%1
+    %define  mmzmm%1  mm%1
+    %define xmmmm%1   mm%1
+    %define xmmxmm%1 xmm%1
+    %define xmmymm%1 xmm%1
+    %define xmmzmm%1 xmm%1
+    %define ymmmm%1   mm%1
+    %define ymmxmm%1 xmm%1
+    %define ymmymm%1 ymm%1
+    %define ymmzmm%1 ymm%1
+    %define zmmmm%1   mm%1
+    %define zmmxmm%1 xmm%1
+    %define zmmymm%1 ymm%1
+    %define zmmzmm%1 zmm%1
+    %define xm%1 xmm %+ m%1
+    %define ym%1 ymm %+ m%1
+    %define zm%1 zmm %+ m%1
+%endmacro
+
+%assign i 0
+%rep 32
+    DECLARE_MMCAST i
+    %assign i i+1
+%endrep
+
+; I often want to use macros that permute their arguments. e.g. there's no
+; efficient way to implement butterfly or transpose or dct without swapping some
+; arguments.
+;
+; I would like to not have to manually keep track of the permutations:
+; If I insert a permutation in the middle of a function, it should automatically
+; change everything that follows. For more complex macros I may also have multiple
+; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
+;
+; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
+; permutes its arguments. It's equivalent to exchanging the contents of the
+; registers, except that this way you exchange the register names instead, so it
+; doesn't cost any cycles.
+
+%macro PERMUTE 2-* ; takes a list of pairs to swap
+    %rep %0/2
+        %xdefine %%tmp%2 m%2
+        %rotate 2
+    %endrep
+    %rep %0/2
+        %xdefine m%1 %%tmp%2
+        CAT_XDEFINE nn, m%1, %1
+        %rotate 2
+    %endrep
+%endmacro
+
+%macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
+    %ifnum %1 ; SWAP 0, 1, ...
+        SWAP_INTERNAL_NUM %1, %2
+    %else ; SWAP m0, m1, ...
+        SWAP_INTERNAL_NAME %1, %2
+    %endif
+%endmacro
+
+%macro SWAP_INTERNAL_NUM 2-*
+    %rep %0-1
+        %xdefine %%tmp m%1
+        %xdefine m%1 m%2
+        %xdefine m%2 %%tmp
+        CAT_XDEFINE nn, m%1, %1
+        CAT_XDEFINE nn, m%2, %2
+        %rotate 1
+    %endrep
+%endmacro
+
+%macro SWAP_INTERNAL_NAME 2-*
+    %xdefine %%args nn %+ %1
+    %rep %0-1
+        %xdefine %%args %%args, nn %+ %2
+        %rotate 1
+    %endrep
+    SWAP_INTERNAL_NUM %%args
+%endmacro
+
+; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
+; calls to that function will automatically load the permutation, so values can
+; be returned in mmregs.
+%macro SAVE_MM_PERMUTATION 0-1
+    %if %0
+        %xdefine %%f %1_m
+    %else
+        %xdefine %%f current_function %+ _m
+    %endif
+    %assign %%i 0
+    %rep num_mmregs
+        %xdefine %%tmp m %+ %%i
+        CAT_XDEFINE %%f, %%i, regnumof %+ %%tmp
+        %assign %%i %%i+1
+    %endrep
+%endmacro
+
+%macro LOAD_MM_PERMUTATION 0-1 ; name to load from
+    %if %0
+        %xdefine %%f %1_m
+    %else
+        %xdefine %%f current_function %+ _m
+    %endif
+    %xdefine %%tmp %%f %+ 0
+    %ifnum %%tmp
+        RESET_MM_PERMUTATION
+        %assign %%i 0
+        %rep num_mmregs
+            %xdefine %%tmp %%f %+ %%i
+            CAT_XDEFINE %%m, %%i, m %+ %%tmp
+            %assign %%i %%i+1
+        %endrep
+        %rep num_mmregs
+            %assign %%i %%i-1
+            CAT_XDEFINE m, %%i, %%m %+ %%i
+            CAT_XDEFINE nn, m %+ %%i, %%i
+        %endrep
+    %endif
+%endmacro
+
+; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
+%macro call 1
+    %ifid %1
+        call_internal %1 %+ SUFFIX, %1
+    %else
+        call %1
+    %endif
+%endmacro
+%macro call_internal 2
+    %xdefine %%i %2
+    %ifndef cglobaled_%2
+        %ifdef cglobaled_%1
+            %xdefine %%i %1
+        %endif
+    %endif
+    call %%i
+    LOAD_MM_PERMUTATION %%i
+%endmacro
+
+; Substitutions that reduce instruction size but are functionally equivalent
+%macro add 2
+    %ifnum %2
+        %if %2==128
+            sub %1, -128
+        %else
+            add %1, %2
+        %endif
+    %else
+        add %1, %2
+    %endif
+%endmacro
+
+%macro sub 2
+    %ifnum %2
+        %if %2==128
+            add %1, -128
+        %else
+            sub %1, %2
+        %endif
+    %else
+        sub %1, %2
+    %endif
+%endmacro
+
+;=============================================================================
+; AVX abstraction layer
+;=============================================================================
+
+%assign i 0
+%rep 32
+    %if i < 8
+        CAT_XDEFINE sizeofmm, i, 8
+        CAT_XDEFINE regnumofmm, i, i
+    %endif
+    CAT_XDEFINE sizeofxmm, i, 16
+    CAT_XDEFINE sizeofymm, i, 32
+    CAT_XDEFINE sizeofzmm, i, 64
+    CAT_XDEFINE regnumofxmm, i, i
+    CAT_XDEFINE regnumofymm, i, i
+    CAT_XDEFINE regnumofzmm, i, i
+    %assign i i+1
+%endrep
+%undef i
+
+%macro CHECK_AVX_INSTR_EMU 3-*
+    %xdefine %%opcode %1
+    %xdefine %%dst %2
+    %rep %0-2
+        %ifidn %%dst, %3
+            %error non-avx emulation of ``%%opcode'' is not supported
+        %endif
+        %rotate 1
+    %endrep
+%endmacro
+
+;%1 == instruction
+;%2 == minimal instruction set
+;%3 == 1 if float, 0 if int
+;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
+;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
+;%6+: operands
+%macro RUN_AVX_INSTR 6-9+
+    %ifnum sizeof%7
+        %assign __sizeofreg sizeof%7
+    %elifnum sizeof%6
+        %assign __sizeofreg sizeof%6
+    %else
+        %assign __sizeofreg mmsize
+    %endif
+    %assign __emulate_avx 0
+    %if avx_enabled && __sizeofreg >= 16
+        %xdefine __instr v%1
+    %else
+        %xdefine __instr %1
+        %if %0 >= 8+%4
+            %assign __emulate_avx 1
+        %endif
+    %endif
+    %ifnidn %2, fnord
+        %ifdef cpuname
+            %if notcpuflag(%2)
+                %error use of ``%1'' %2 instruction in cpuname function: current_function
+            %elif cpuflags_%2 < cpuflags_sse && notcpuflag(sse2) && __sizeofreg > 8
+                %error use of ``%1'' sse2 instruction in cpuname function: current_function
+            %endif
+        %endif
+    %endif
+
+    %if __emulate_avx
+        %xdefine __src1 %7
+        %xdefine __src2 %8
+        %if %5 && %4 == 0
+            %ifnidn %6, %7
+                %ifidn %6, %8
+                    %xdefine __src1 %8
+                    %xdefine __src2 %7
+                %elifnnum sizeof%8
+                    ; 3-operand AVX instructions with a memory arg can only have it in src2,
+                    ; whereas SSE emulation prefers to have it in src1 (i.e. the mov).
+                    ; So, if the instruction is commutative with a memory arg, swap them.
+                    %xdefine __src1 %8
+                    %xdefine __src2 %7
+                %endif
+            %endif
+        %endif
+        %ifnidn %6, __src1
+            %if %0 >= 9
+                CHECK_AVX_INSTR_EMU {%1 %6, %7, %8, %9}, %6, __src2, %9
+            %else
+                CHECK_AVX_INSTR_EMU {%1 %6, %7, %8}, %6, __src2
+            %endif
+            %if __sizeofreg == 8
+                MOVQ %6, __src1
+            %elif %3
+                MOVAPS %6, __src1
+            %else
+                MOVDQA %6, __src1
+            %endif
+        %endif
+        %if %0 >= 9
+            %1 %6, __src2, %9
+        %else
+            %1 %6, __src2
+        %endif
+    %elif %0 >= 9
+        __instr %6, %7, %8, %9
+    %elif %0 == 8
+        %if avx_enabled && %5
+            %xdefine __src1 %7
+            %xdefine __src2 %8
+            %ifnum regnumof%7
+                %ifnum regnumof%8
+                    %if regnumof%7 < 8 && regnumof%8 >= 8 && regnumof%8 < 16 && sizeof%8 <= 32
+                        ; Most VEX-encoded instructions require an additional byte to encode when
+                        ; src2 is a high register (e.g. m8..15). If the instruction is commutative
+                        ; we can swap src1 and src2 when doing so reduces the instruction length.
+                        %xdefine __src1 %8
+                        %xdefine __src2 %7
+                    %endif
+                %endif
+            %endif
+            __instr %6, __src1, __src2
+        %else
+            __instr %6, %7, %8
+        %endif
+    %elif %0 == 7
+        %if avx_enabled && %5
+            %xdefine __src1 %6
+            %xdefine __src2 %7
+            %ifnum regnumof%6
+                %ifnum regnumof%7
+                    %if regnumof%6 < 8 && regnumof%7 >= 8 && regnumof%7 < 16 && sizeof%7 <= 32
+                        %xdefine __src1 %7
+                        %xdefine __src2 %6
+                    %endif
+                %endif
+            %endif
+            __instr %6, __src1, __src2
+        %else
+            __instr %6, %7
+        %endif
+    %else
+        __instr %6
+    %endif
+%endmacro
+
+;%1 == instruction
+;%2 == minimal instruction set
+;%3 == 1 if float, 0 if int
+;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
+;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
+%macro AVX_INSTR 1-5 fnord, 0, 255, 0
+    %macro %1 1-10 fnord, fnord, fnord, fnord, %1, %2, %3, %4, %5
+        %ifidn %2, fnord
+            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1
+        %elifidn %3, fnord
+            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2
+        %elifidn %4, fnord
+            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3
+        %elifidn %5, fnord
+            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4
+        %else
+            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4, %5
+        %endif
+    %endmacro
+%endmacro
+
+; Instructions with both VEX/EVEX and legacy encodings
+; Non-destructive instructions are written without parameters
+AVX_INSTR addpd, sse2, 1, 0, 1
+AVX_INSTR addps, sse, 1, 0, 1
+AVX_INSTR addsd, sse2, 1, 0, 0
+AVX_INSTR addss, sse, 1, 0, 0
+AVX_INSTR addsubpd, sse3, 1, 0, 0
+AVX_INSTR addsubps, sse3, 1, 0, 0
+AVX_INSTR aesdec, aesni, 0, 0, 0
+AVX_INSTR aesdeclast, aesni, 0, 0, 0
+AVX_INSTR aesenc, aesni, 0, 0, 0
+AVX_INSTR aesenclast, aesni, 0, 0, 0
+AVX_INSTR aesimc, aesni
+AVX_INSTR aeskeygenassist, aesni
+AVX_INSTR andnpd, sse2, 1, 0, 0
+AVX_INSTR andnps, sse, 1, 0, 0
+AVX_INSTR andpd, sse2, 1, 0, 1
+AVX_INSTR andps, sse, 1, 0, 1
+AVX_INSTR blendpd, sse4, 1, 1, 0
+AVX_INSTR blendps, sse4, 1, 1, 0
+AVX_INSTR blendvpd, sse4 ; can't be emulated
+AVX_INSTR blendvps, sse4 ; can't be emulated
+AVX_INSTR cmpeqpd, sse2, 1, 0, 1
+AVX_INSTR cmpeqps, sse, 1, 0, 1
+AVX_INSTR cmpeqsd, sse2, 1, 0, 0
+AVX_INSTR cmpeqss, sse, 1, 0, 0
+AVX_INSTR cmplepd, sse2, 1, 0, 0
+AVX_INSTR cmpleps, sse, 1, 0, 0
+AVX_INSTR cmplesd, sse2, 1, 0, 0
+AVX_INSTR cmpless, sse, 1, 0, 0
+AVX_INSTR cmpltpd, sse2, 1, 0, 0
+AVX_INSTR cmpltps, sse, 1, 0, 0
+AVX_INSTR cmpltsd, sse2, 1, 0, 0
+AVX_INSTR cmpltss, sse, 1, 0, 0
+AVX_INSTR cmpneqpd, sse2, 1, 0, 1
+AVX_INSTR cmpneqps, sse, 1, 0, 1
+AVX_INSTR cmpneqsd, sse2, 1, 0, 0
+AVX_INSTR cmpneqss, sse, 1, 0, 0
+AVX_INSTR cmpnlepd, sse2, 1, 0, 0
+AVX_INSTR cmpnleps, sse, 1, 0, 0
+AVX_INSTR cmpnlesd, sse2, 1, 0, 0
+AVX_INSTR cmpnless, sse, 1, 0, 0
+AVX_INSTR cmpnltpd, sse2, 1, 0, 0
+AVX_INSTR cmpnltps, sse, 1, 0, 0
+AVX_INSTR cmpnltsd, sse2, 1, 0, 0
+AVX_INSTR cmpnltss, sse, 1, 0, 0
+AVX_INSTR cmpordpd, sse2 1, 0, 1
+AVX_INSTR cmpordps, sse 1, 0, 1
+AVX_INSTR cmpordsd, sse2 1, 0, 0
+AVX_INSTR cmpordss, sse 1, 0, 0
+AVX_INSTR cmppd, sse2, 1, 1, 0
+AVX_INSTR cmpps, sse, 1, 1, 0
+AVX_INSTR cmpsd, sse2, 1, 1, 0
+AVX_INSTR cmpss, sse, 1, 1, 0
+AVX_INSTR cmpunordpd, sse2, 1, 0, 1
+AVX_INSTR cmpunordps, sse, 1, 0, 1
+AVX_INSTR cmpunordsd, sse2, 1, 0, 0
+AVX_INSTR cmpunordss, sse, 1, 0, 0
+AVX_INSTR comisd, sse2
+AVX_INSTR comiss, sse
+AVX_INSTR cvtdq2pd, sse2
+AVX_INSTR cvtdq2ps, sse2
+AVX_INSTR cvtpd2dq, sse2
+AVX_INSTR cvtpd2ps, sse2
+AVX_INSTR cvtps2dq, sse2
+AVX_INSTR cvtps2pd, sse2
+AVX_INSTR cvtsd2si, sse2
+AVX_INSTR cvtsd2ss, sse2, 1, 0, 0
+AVX_INSTR cvtsi2sd, sse2, 1, 0, 0
+AVX_INSTR cvtsi2ss, sse, 1, 0, 0
+AVX_INSTR cvtss2sd, sse2, 1, 0, 0
+AVX_INSTR cvtss2si, sse
+AVX_INSTR cvttpd2dq, sse2
+AVX_INSTR cvttps2dq, sse2
+AVX_INSTR cvttsd2si, sse2
+AVX_INSTR cvttss2si, sse
+AVX_INSTR divpd, sse2, 1, 0, 0
+AVX_INSTR divps, sse, 1, 0, 0
+AVX_INSTR divsd, sse2, 1, 0, 0
+AVX_INSTR divss, sse, 1, 0, 0
+AVX_INSTR dppd, sse4, 1, 1, 0
+AVX_INSTR dpps, sse4, 1, 1, 0
+AVX_INSTR extractps, sse4
+AVX_INSTR haddpd, sse3, 1, 0, 0
+AVX_INSTR haddps, sse3, 1, 0, 0
+AVX_INSTR hsubpd, sse3, 1, 0, 0
+AVX_INSTR hsubps, sse3, 1, 0, 0
+AVX_INSTR insertps, sse4, 1, 1, 0
+AVX_INSTR lddqu, sse3
+AVX_INSTR ldmxcsr, sse
+AVX_INSTR maskmovdqu, sse2
+AVX_INSTR maxpd, sse2, 1, 0, 1
+AVX_INSTR maxps, sse, 1, 0, 1
+AVX_INSTR maxsd, sse2, 1, 0, 0
+AVX_INSTR maxss, sse, 1, 0, 0
+AVX_INSTR minpd, sse2, 1, 0, 1
+AVX_INSTR minps, sse, 1, 0, 1
+AVX_INSTR minsd, sse2, 1, 0, 0
+AVX_INSTR minss, sse, 1, 0, 0
+AVX_INSTR movapd, sse2
+AVX_INSTR movaps, sse
+AVX_INSTR movd, mmx
+AVX_INSTR movddup, sse3
+AVX_INSTR movdqa, sse2
+AVX_INSTR movdqu, sse2
+AVX_INSTR movhlps, sse, 1, 0, 0
+AVX_INSTR movhpd, sse2, 1, 0, 0
+AVX_INSTR movhps, sse, 1, 0, 0
+AVX_INSTR movlhps, sse, 1, 0, 0
+AVX_INSTR movlpd, sse2, 1, 0, 0
+AVX_INSTR movlps, sse, 1, 0, 0
+AVX_INSTR movmskpd, sse2
+AVX_INSTR movmskps, sse
+AVX_INSTR movntdq, sse2
+AVX_INSTR movntdqa, sse4
+AVX_INSTR movntpd, sse2
+AVX_INSTR movntps, sse
+AVX_INSTR movq, mmx
+AVX_INSTR movsd, sse2, 1, 0, 0
+AVX_INSTR movshdup, sse3
+AVX_INSTR movsldup, sse3
+AVX_INSTR movss, sse, 1, 0, 0
+AVX_INSTR movupd, sse2
+AVX_INSTR movups, sse
+AVX_INSTR mpsadbw, sse4, 0, 1, 0
+AVX_INSTR mulpd, sse2, 1, 0, 1
+AVX_INSTR mulps, sse, 1, 0, 1
+AVX_INSTR mulsd, sse2, 1, 0, 0
+AVX_INSTR mulss, sse, 1, 0, 0
+AVX_INSTR orpd, sse2, 1, 0, 1
+AVX_INSTR orps, sse, 1, 0, 1
+AVX_INSTR pabsb, ssse3
+AVX_INSTR pabsd, ssse3
+AVX_INSTR pabsw, ssse3
+AVX_INSTR packsswb, mmx, 0, 0, 0
+AVX_INSTR packssdw, mmx, 0, 0, 0
+AVX_INSTR packuswb, mmx, 0, 0, 0
+AVX_INSTR packusdw, sse4, 0, 0, 0
+AVX_INSTR paddb, mmx, 0, 0, 1
+AVX_INSTR paddw, mmx, 0, 0, 1
+AVX_INSTR paddd, mmx, 0, 0, 1
+AVX_INSTR paddq, sse2, 0, 0, 1
+AVX_INSTR paddsb, mmx, 0, 0, 1
+AVX_INSTR paddsw, mmx, 0, 0, 1
+AVX_INSTR paddusb, mmx, 0, 0, 1
+AVX_INSTR paddusw, mmx, 0, 0, 1
+AVX_INSTR palignr, ssse3, 0, 1, 0
+AVX_INSTR pand, mmx, 0, 0, 1
+AVX_INSTR pandn, mmx, 0, 0, 0
+AVX_INSTR pavgb, mmx2, 0, 0, 1
+AVX_INSTR pavgw, mmx2, 0, 0, 1
+AVX_INSTR pblendvb, sse4 ; can't be emulated
+AVX_INSTR pblendw, sse4, 0, 1, 0
+AVX_INSTR pclmulqdq, fnord, 0, 1, 0
+AVX_INSTR pclmulhqhqdq, fnord, 0, 0, 0
+AVX_INSTR pclmulhqlqdq, fnord, 0, 0, 0
+AVX_INSTR pclmullqhqdq, fnord, 0, 0, 0
+AVX_INSTR pclmullqlqdq, fnord, 0, 0, 0
+AVX_INSTR pcmpestri, sse42
+AVX_INSTR pcmpestrm, sse42
+AVX_INSTR pcmpistri, sse42
+AVX_INSTR pcmpistrm, sse42
+AVX_INSTR pcmpeqb, mmx, 0, 0, 1
+AVX_INSTR pcmpeqw, mmx, 0, 0, 1
+AVX_INSTR pcmpeqd, mmx, 0, 0, 1
+AVX_INSTR pcmpeqq, sse4, 0, 0, 1
+AVX_INSTR pcmpgtb, mmx, 0, 0, 0
+AVX_INSTR pcmpgtw, mmx, 0, 0, 0
+AVX_INSTR pcmpgtd, mmx, 0, 0, 0
+AVX_INSTR pcmpgtq, sse42, 0, 0, 0
+AVX_INSTR pextrb, sse4
+AVX_INSTR pextrd, sse4
+AVX_INSTR pextrq, sse4
+AVX_INSTR pextrw, mmx2
+AVX_INSTR phaddw, ssse3, 0, 0, 0
+AVX_INSTR phaddd, ssse3, 0, 0, 0
+AVX_INSTR phaddsw, ssse3, 0, 0, 0
+AVX_INSTR phminposuw, sse4
+AVX_INSTR phsubw, ssse3, 0, 0, 0
+AVX_INSTR phsubd, ssse3, 0, 0, 0
+AVX_INSTR phsubsw, ssse3, 0, 0, 0
+AVX_INSTR pinsrb, sse4, 0, 1, 0
+AVX_INSTR pinsrd, sse4, 0, 1, 0
+AVX_INSTR pinsrq, sse4, 0, 1, 0
+AVX_INSTR pinsrw, mmx2, 0, 1, 0
+AVX_INSTR pmaddwd, mmx, 0, 0, 1
+AVX_INSTR pmaddubsw, ssse3, 0, 0, 0
+AVX_INSTR pmaxsb, sse4, 0, 0, 1
+AVX_INSTR pmaxsw, mmx2, 0, 0, 1
+AVX_INSTR pmaxsd, sse4, 0, 0, 1
+AVX_INSTR pmaxub, mmx2, 0, 0, 1
+AVX_INSTR pmaxuw, sse4, 0, 0, 1
+AVX_INSTR pmaxud, sse4, 0, 0, 1
+AVX_INSTR pminsb, sse4, 0, 0, 1
+AVX_INSTR pminsw, mmx2, 0, 0, 1
+AVX_INSTR pminsd, sse4, 0, 0, 1
+AVX_INSTR pminub, mmx2, 0, 0, 1
+AVX_INSTR pminuw, sse4, 0, 0, 1
+AVX_INSTR pminud, sse4, 0, 0, 1
+AVX_INSTR pmovmskb, mmx2
+AVX_INSTR pmovsxbw, sse4
+AVX_INSTR pmovsxbd, sse4
+AVX_INSTR pmovsxbq, sse4
+AVX_INSTR pmovsxwd, sse4
+AVX_INSTR pmovsxwq, sse4
+AVX_INSTR pmovsxdq, sse4
+AVX_INSTR pmovzxbw, sse4
+AVX_INSTR pmovzxbd, sse4
+AVX_INSTR pmovzxbq, sse4
+AVX_INSTR pmovzxwd, sse4
+AVX_INSTR pmovzxwq, sse4
+AVX_INSTR pmovzxdq, sse4
+AVX_INSTR pmuldq, sse4, 0, 0, 1
+AVX_INSTR pmulhrsw, ssse3, 0, 0, 1
+AVX_INSTR pmulhuw, mmx2, 0, 0, 1
+AVX_INSTR pmulhw, mmx, 0, 0, 1
+AVX_INSTR pmullw, mmx, 0, 0, 1
+AVX_INSTR pmulld, sse4, 0, 0, 1
+AVX_INSTR pmuludq, sse2, 0, 0, 1
+AVX_INSTR por, mmx, 0, 0, 1
+AVX_INSTR psadbw, mmx2, 0, 0, 1
+AVX_INSTR pshufb, ssse3, 0, 0, 0
+AVX_INSTR pshufd, sse2
+AVX_INSTR pshufhw, sse2
+AVX_INSTR pshuflw, sse2
+AVX_INSTR psignb, ssse3, 0, 0, 0
+AVX_INSTR psignw, ssse3, 0, 0, 0
+AVX_INSTR psignd, ssse3, 0, 0, 0
+AVX_INSTR psllw, mmx, 0, 0, 0
+AVX_INSTR pslld, mmx, 0, 0, 0
+AVX_INSTR psllq, mmx, 0, 0, 0
+AVX_INSTR pslldq, sse2, 0, 0, 0
+AVX_INSTR psraw, mmx, 0, 0, 0
+AVX_INSTR psrad, mmx, 0, 0, 0
+AVX_INSTR psrlw, mmx, 0, 0, 0
+AVX_INSTR psrld, mmx, 0, 0, 0
+AVX_INSTR psrlq, mmx, 0, 0, 0
+AVX_INSTR psrldq, sse2, 0, 0, 0
+AVX_INSTR psubb, mmx, 0, 0, 0
+AVX_INSTR psubw, mmx, 0, 0, 0
+AVX_INSTR psubd, mmx, 0, 0, 0
+AVX_INSTR psubq, sse2, 0, 0, 0
+AVX_INSTR psubsb, mmx, 0, 0, 0
+AVX_INSTR psubsw, mmx, 0, 0, 0
+AVX_INSTR psubusb, mmx, 0, 0, 0
+AVX_INSTR psubusw, mmx, 0, 0, 0
+AVX_INSTR ptest, sse4
+AVX_INSTR punpckhbw, mmx, 0, 0, 0
+AVX_INSTR punpckhwd, mmx, 0, 0, 0
+AVX_INSTR punpckhdq, mmx, 0, 0, 0
+AVX_INSTR punpckhqdq, sse2, 0, 0, 0
+AVX_INSTR punpcklbw, mmx, 0, 0, 0
+AVX_INSTR punpcklwd, mmx, 0, 0, 0
+AVX_INSTR punpckldq, mmx, 0, 0, 0
+AVX_INSTR punpcklqdq, sse2, 0, 0, 0
+AVX_INSTR pxor, mmx, 0, 0, 1
+AVX_INSTR rcpps, sse
+AVX_INSTR rcpss, sse, 1, 0, 0
+AVX_INSTR roundpd, sse4
+AVX_INSTR roundps, sse4
+AVX_INSTR roundsd, sse4, 1, 1, 0
+AVX_INSTR roundss, sse4, 1, 1, 0
+AVX_INSTR rsqrtps, sse
+AVX_INSTR rsqrtss, sse, 1, 0, 0
+AVX_INSTR shufpd, sse2, 1, 1, 0
+AVX_INSTR shufps, sse, 1, 1, 0
+AVX_INSTR sqrtpd, sse2
+AVX_INSTR sqrtps, sse
+AVX_INSTR sqrtsd, sse2, 1, 0, 0
+AVX_INSTR sqrtss, sse, 1, 0, 0
+AVX_INSTR stmxcsr, sse
+AVX_INSTR subpd, sse2, 1, 0, 0
+AVX_INSTR subps, sse, 1, 0, 0
+AVX_INSTR subsd, sse2, 1, 0, 0
+AVX_INSTR subss, sse, 1, 0, 0
+AVX_INSTR ucomisd, sse2
+AVX_INSTR ucomiss, sse
+AVX_INSTR unpckhpd, sse2, 1, 0, 0
+AVX_INSTR unpckhps, sse, 1, 0, 0
+AVX_INSTR unpcklpd, sse2, 1, 0, 0
+AVX_INSTR unpcklps, sse, 1, 0, 0
+AVX_INSTR xorpd, sse2, 1, 0, 1
+AVX_INSTR xorps, sse, 1, 0, 1
+
+; 3DNow instructions, for sharing code between AVX, SSE and 3DN
+AVX_INSTR pfadd, 3dnow, 1, 0, 1
+AVX_INSTR pfsub, 3dnow, 1, 0, 0
+AVX_INSTR pfmul, 3dnow, 1, 0, 1
+
+; base-4 constants for shuffles
+%assign i 0
+%rep 256
+    %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
+    %if j < 10
+        CAT_XDEFINE q000, j, i
+    %elif j < 100
+        CAT_XDEFINE q00, j, i
+    %elif j < 1000
+        CAT_XDEFINE q0, j, i
+    %else
+        CAT_XDEFINE q, j, i
+    %endif
+    %assign i i+1
+%endrep
+%undef i
+%undef j
+
+%macro FMA_INSTR 3
+    %macro %1 4-7 %1, %2, %3
+        %if cpuflag(xop)
+            v%5 %1, %2, %3, %4
+        %elifnidn %1, %4
+            %6 %1, %2, %3
+            %7 %1, %4
+        %else
+            %error non-xop emulation of ``%5 %1, %2, %3, %4'' is not supported
+        %endif
+    %endmacro
+%endmacro
+
+FMA_INSTR  pmacsww,  pmullw, paddw
+FMA_INSTR  pmacsdd,  pmulld, paddd ; sse4 emulation
+FMA_INSTR pmacsdql,  pmuldq, paddq ; sse4 emulation
+FMA_INSTR pmadcswd, pmaddwd, paddd
+
+; Macros for consolidating FMA3 and FMA4 using 4-operand (dst, src1, src2, src3) syntax.
+; FMA3 is only possible if dst is the same as one of the src registers.
+; Either src2 or src3 can be a memory operand.
+%macro FMA4_INSTR 2-*
+    %push fma4_instr
+    %xdefine %$prefix %1
+    %rep %0 - 1
+        %macro %$prefix%2 4-6 %$prefix, %2
+            %if notcpuflag(fma3) && notcpuflag(fma4)
+                %error use of ``%5%6'' fma instruction in cpuname function: current_function
+            %elif cpuflag(fma4)
+                v%5%6 %1, %2, %3, %4
+            %elifidn %1, %2
+                ; If %3 or %4 is a memory operand it needs to be encoded as the last operand.
+                %ifnum sizeof%3
+                    v%{5}213%6 %2, %3, %4
+                %else
+                    v%{5}132%6 %2, %4, %3
+                %endif
+            %elifidn %1, %3
+                v%{5}213%6 %3, %2, %4
+            %elifidn %1, %4
+                v%{5}231%6 %4, %2, %3
+            %else
+                %error fma3 emulation of ``%5%6 %1, %2, %3, %4'' is not supported
+            %endif
+        %endmacro
+        %rotate 1
+    %endrep
+    %pop
+%endmacro
+
+FMA4_INSTR fmadd,    pd, ps, sd, ss
+FMA4_INSTR fmaddsub, pd, ps
+FMA4_INSTR fmsub,    pd, ps, sd, ss
+FMA4_INSTR fmsubadd, pd, ps
+FMA4_INSTR fnmadd,   pd, ps, sd, ss
+FMA4_INSTR fnmsub,   pd, ps, sd, ss
+
+; Macros for converting VEX instructions to equivalent EVEX ones.
+%macro EVEX_INSTR 2-3 0 ; vex, evex, prefer_evex
+    %macro %1 2-7 fnord, fnord, %1, %2, %3
+        %ifidn %3, fnord
+            %define %%args %1, %2
+        %elifidn %4, fnord
+            %define %%args %1, %2, %3
+        %else
+            %define %%args %1, %2, %3, %4
+        %endif
+        %assign %%evex_required cpuflag(avx512) & %7
+        %ifnum regnumof%1
+            %if regnumof%1 >= 16 || sizeof%1 > 32
+                %assign %%evex_required 1
+            %endif
+        %endif
+        %ifnum regnumof%2
+            %if regnumof%2 >= 16 || sizeof%2 > 32
+                %assign %%evex_required 1
+            %endif
+        %endif
+        %ifnum regnumof%3
+            %if regnumof%3 >= 16 || sizeof%3 > 32
+                %assign %%evex_required 1
+            %endif
+        %endif
+        %if %%evex_required
+            %6 %%args
+        %else
+            %5 %%args ; Prefer VEX over EVEX due to shorter instruction length
+        %endif
+    %endmacro
+%endmacro
+
+EVEX_INSTR vbroadcastf128, vbroadcastf32x4
+EVEX_INSTR vbroadcasti128, vbroadcasti32x4
+EVEX_INSTR vextractf128,   vextractf32x4
+EVEX_INSTR vextracti128,   vextracti32x4
+EVEX_INSTR vinsertf128,    vinsertf32x4
+EVEX_INSTR vinserti128,    vinserti32x4
+EVEX_INSTR vmovdqa,        vmovdqa32
+EVEX_INSTR vmovdqu,        vmovdqu32
+EVEX_INSTR vpand,          vpandd
+EVEX_INSTR vpandn,         vpandnd
+EVEX_INSTR vpor,           vpord
+EVEX_INSTR vpxor,          vpxord
+EVEX_INSTR vrcpps,         vrcp14ps,   1 ; EVEX versions have higher precision
+EVEX_INSTR vrcpss,         vrcp14ss,   1
+EVEX_INSTR vrsqrtps,       vrsqrt14ps, 1
+EVEX_INSTR vrsqrtss,       vrsqrt14ss, 1
diff --git a/src/x86/mc.asm b/src/x86/mc.asm
new file mode 100644
index 0000000000000000000000000000000000000000..fa8db78f30b16222b5ea0ecc71f987dc0be89757
--- /dev/null
+++ b/src/x86/mc.asm
@@ -0,0 +1,3072 @@
+; Copyright © 2018, VideoLAN and dav1d authors
+; Copyright © 2018, Two Orioles, LLC
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; 1. Redistributions of source code must retain the above copyright notice, this
+;    list of conditions and the following disclaimer.
+;
+; 2. Redistributions in binary form must reproduce the above copyright notice,
+;    this list of conditions and the following disclaimer in the documentation
+;    and/or other materials provided with the distribution.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+; ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+; WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+; ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+; (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+; ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+%include "config.asm"
+%include "ext/x86/x86inc.asm"
+
+%if ARCH_X86_64
+
+SECTION_RODATA 32
+
+subpel_h_shuf4: db 0,  1,  2,  3,  1,  2,  3,  4,  8,  9, 10, 11,  9, 10, 11, 12
+                db 2,  3,  4,  5,  3,  4,  5,  6, 10, 11, 12, 13, 11, 12, 13, 14
+subpel_h_shufA: db 0,  1,  2,  3,  1,  2,  3,  4,  2,  3,  4,  5,  3,  4,  5,  6
+subpel_h_shufB: db 4,  5,  6,  7,  5,  6,  7,  8,  6,  7,  8,  9,  7,  8,  9, 10
+subpel_h_shufC: db 8,  9, 10, 11,  9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14
+bilin_h_shuf4:  db 1,  0,  2,  1,  3,  2,  4,  3,  9,  8, 10,  9, 11, 10, 12, 11
+bilin_h_shuf8:  db 1,  0,  2,  1,  3,  2,  4,  3,  5,  4,  6,  5,  7,  6,  8,  7
+deint_shuf4:    db 0,  4,  1,  5,  2,  6,  3,  7,  4,  8,  5,  9,  6, 10,  7, 11
+
+pw_8:    times 2 dw 8
+pw_26:   times 2 dw 26
+pw_34:   times 2 dw 34
+pw_258:  times 2 dw 258
+pw_512:  times 2 dw 512
+pw_1024: times 2 dw 1024
+pw_2048: times 2 dw 2048
+pw_8192: times 2 dw 8192
+pd_32:   dd 32
+pd_512:  dd 512
+
+cextern mc_subpel_filters
+%define subpel_filters (mangle(private_prefix %+ _mc_subpel_filters)-8)
+
+%macro BIDIR_JMP_TABLE 1-7 4, 8, 16, 32, 64, 128
+    %xdefine %1_table (%%table - 2*4)
+    %xdefine %%prefix mangle(private_prefix %+ _%1)
+    %%table:
+    %rep 6
+        dd %%prefix %+ .w%2 - (%%table - 2*4)
+        %rotate 1
+    %endrep
+%endmacro
+
+BIDIR_JMP_TABLE avg_avx2
+BIDIR_JMP_TABLE w_avg_avx2
+BIDIR_JMP_TABLE mask_avx2
+BIDIR_JMP_TABLE w_mask_420_avx2
+
+%macro BASE_JMP_TABLE 3-*
+    %xdefine %1_%2_table (%%table - %3)
+    %xdefine %%base %1_%2
+    %%table:
+    %rep %0 - 2
+        dw %%base %+ _w%3 - %%base
+        %rotate 1
+    %endrep
+%endmacro
+
+%xdefine put_avx2 mangle(private_prefix %+ _put_bilin_avx2.put)
+%xdefine prep_avx2 mangle(private_prefix %+ _prep_bilin_avx2.prep)
+
+BASE_JMP_TABLE put,  avx2, 2, 4, 8, 16, 32, 64, 128
+BASE_JMP_TABLE prep, avx2,    4, 8, 16, 32, 64, 128
+
+%macro HV_JMP_TABLE 5-*
+    %xdefine %%prefix mangle(private_prefix %+ _%1_%2_%3)
+    %xdefine %%base %1_%3
+    %assign %%types %4
+    %if %%types & 1
+        %xdefine %1_%2_h_%3_table  (%%h  - %5)
+        %%h:
+        %rep %0 - 4
+            dw %%prefix %+ .h_w%5 - %%base
+            %rotate 1
+        %endrep
+        %rotate 4
+    %endif
+    %if %%types & 2
+        %xdefine %1_%2_v_%3_table  (%%v  - %5)
+        %%v:
+        %rep %0 - 4
+            dw %%prefix %+ .v_w%5 - %%base
+            %rotate 1
+        %endrep
+        %rotate 4
+    %endif
+    %if %%types & 4
+        %xdefine %1_%2_hv_%3_table (%%hv - %5)
+        %%hv:
+        %rep %0 - 4
+            dw %%prefix %+ .hv_w%5 - %%base
+            %rotate 1
+        %endrep
+    %endif
+%endmacro
+
+HV_JMP_TABLE put,  8tap,  avx2, 3, 2, 4, 8, 16, 32, 64, 128
+HV_JMP_TABLE prep, 8tap,  avx2, 1,    4, 8, 16, 32, 64, 128
+HV_JMP_TABLE put,  bilin, avx2, 7, 2, 4, 8, 16, 32, 64, 128
+HV_JMP_TABLE prep, bilin, avx2, 7,    4, 8, 16, 32, 64, 128
+
+%define table_offset(type, fn) type %+ fn %+ SUFFIX %+ _table - type %+ SUFFIX
+
+SECTION .text
+
+INIT_XMM avx2
+DECLARE_REG_TMP 4, 6, 7
+cglobal put_bilin, 4, 8, 0, dst, ds, src, ss, w, h, mxy
+    movifnidn          mxyd, r6m ; mx
+    lea                  t2, [put_avx2]
+    tzcnt                wd, wm
+    movifnidn            hd, hm
+    test               mxyd, mxyd
+    jnz .h
+    mov                mxyd, r7m ; my
+    test               mxyd, mxyd
+    jnz .v
+.put:
+    movzx                wd, word [t2+wq*2+table_offset(put,)]
+    add                  wq, t2
+    lea                  t1, [ssq*3]
+    lea                  t2, [dsq*3]
+    jmp                  wq
+.put_w2:
+    movzx               t0d, word [srcq+ssq*0]
+    movzx               t1d, word [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    mov        [dstq+dsq*0], t0w
+    mov        [dstq+dsq*1], t1w
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .put_w2
+    RET
+.put_w4:
+    mov                 t0d, [srcq+ssq*0]
+    mov                 t1d, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    mov        [dstq+dsq*0], t0d
+    mov        [dstq+dsq*1], t1d
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .put_w4
+    RET
+.put_w8:
+    movq                 m0, [srcq+ssq*0]
+    movq                 m1, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    movq       [dstq+dsq*0], m0
+    movq       [dstq+dsq*1], m1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .put_w8
+    RET
+.put_w16:
+    movu                 m0, [srcq+ssq*0]
+    movu                 m1, [srcq+ssq*1]
+    movu                 m2, [srcq+ssq*2]
+    movu                 m3, [srcq+t1   ]
+    lea                srcq, [srcq+ssq*4]
+    mova       [dstq+dsq*0], m0
+    mova       [dstq+dsq*1], m1
+    mova       [dstq+dsq*2], m2
+    mova       [dstq+t2   ], m3
+    lea                dstq, [dstq+dsq*4]
+    sub                  hd, 4
+    jg .put_w16
+    RET
+INIT_YMM avx2
+.put_w32:
+    movu                 m0, [srcq+ssq*0]
+    movu                 m1, [srcq+ssq*1]
+    movu                 m2, [srcq+ssq*2]
+    movu                 m3, [srcq+t1   ]
+    lea                srcq, [srcq+ssq*4]
+    mova       [dstq+dsq*0], m0
+    mova       [dstq+dsq*1], m1
+    mova       [dstq+dsq*2], m2
+    mova       [dstq+t2   ], m3
+    lea                dstq, [dstq+dsq*4]
+    sub                  hd, 4
+    jg .put_w32
+    RET
+.put_w64:
+    movu                 m0, [srcq+ssq*0+32*0]
+    movu                 m1, [srcq+ssq*0+32*1]
+    movu                 m2, [srcq+ssq*1+32*0]
+    movu                 m3, [srcq+ssq*1+32*1]
+    lea                srcq, [srcq+ssq*2]
+    mova  [dstq+dsq*0+32*0], m0
+    mova  [dstq+dsq*0+32*1], m1
+    mova  [dstq+dsq*1+32*0], m2
+    mova  [dstq+dsq*1+32*1], m3
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .put_w64
+    RET
+.put_w128:
+    movu                 m0, [srcq+32*0]
+    movu                 m1, [srcq+32*1]
+    movu                 m2, [srcq+32*2]
+    movu                 m3, [srcq+32*3]
+    add                srcq, ssq
+    mova        [dstq+32*0], m0
+    mova        [dstq+32*1], m1
+    mova        [dstq+32*2], m2
+    mova        [dstq+32*3], m3
+    add                dstq, dsq
+    dec                  hd
+    jg .put_w128
+    RET
+.h:
+    ; (16 * src[x] + (mx * (src[x + 1] - src[x])) + 8) >> 4
+    ; = ((16 - mx) * src[x] + mx * src[x + 1] + 8) >> 4
+    imul               mxyd, 0xff01
+    vbroadcasti128       m4, [bilin_h_shuf8]
+    WIN64_SPILL_XMM       7
+    add                mxyd, 16 << 8
+    movd                xm5, mxyd
+    mov                mxyd, r7m ; my
+    vpbroadcastw         m5, xm5
+    test               mxyd, mxyd
+    jnz .hv
+    movzx                wd, word [t2+wq*2+table_offset(put, _bilin_h)]
+    vpbroadcastd         m6, [pw_2048]
+    add                  wq, t2
+    jmp                  wq
+.h_w2:
+    movd                xm0, [srcq+ssq*0]
+    pinsrd              xm0, [srcq+ssq*1], 1
+    lea                srcq, [srcq+ssq*2]
+    pshufb              xm0, xm4
+    pmaddubsw           xm0, xm5
+    pmulhrsw            xm0, xm6
+    packuswb            xm0, xm0
+    pextrw     [dstq+dsq*0], xm0, 0
+    pextrw     [dstq+dsq*1], xm0, 2
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .h_w2
+    RET
+.h_w4:
+    mova                xm4, [bilin_h_shuf4]
+.h_w4_loop:
+    movq                xm0, [srcq+ssq*0]
+    movhps              xm0, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    pshufb              xm0, xm4
+    pmaddubsw           xm0, xm5
+    pmulhrsw            xm0, xm6
+    packuswb            xm0, xm0
+    movd       [dstq+dsq*0], xm0
+    pextrd     [dstq+dsq*1], xm0, 1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .h_w4_loop
+    RET
+.h_w8:
+    movu                xm0, [srcq+ssq*0]
+    movu                xm1, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    pshufb              xm0, xm4
+    pshufb              xm1, xm4
+    pmaddubsw           xm0, xm5
+    pmaddubsw           xm1, xm5
+    pmulhrsw            xm0, xm6
+    pmulhrsw            xm1, xm6
+    packuswb            xm0, xm1
+    movq       [dstq+dsq*0], xm0
+    movhps     [dstq+dsq*1], xm0
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .h_w8
+    RET
+.h_w16:
+    movu                xm0,     [srcq+ssq*0+8*0]
+    vinserti128          m0, m0, [srcq+ssq*1+8*0], 1
+    movu                xm1,     [srcq+ssq*0+8*1]
+    vinserti128          m1, m1, [srcq+ssq*1+8*1], 1
+    lea                srcq,     [srcq+ssq*2]
+    pshufb               m0, m4
+    pshufb               m1, m4
+    pmaddubsw            m0, m5
+    pmaddubsw            m1, m5
+    pmulhrsw             m0, m6
+    pmulhrsw             m1, m6
+    packuswb             m0, m1
+    mova         [dstq+dsq*0], xm0
+    vextracti128 [dstq+dsq*1], m0, 1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .h_w16
+    RET
+.h_w32:
+    movu                 m0, [srcq+8*0]
+    movu                 m1, [srcq+8*1]
+    add                srcq, ssq
+    pshufb               m0, m4
+    pshufb               m1, m4
+    pmaddubsw            m0, m5
+    pmaddubsw            m1, m5
+    pmulhrsw             m0, m6
+    pmulhrsw             m1, m6
+    packuswb             m0, m1
+    mova             [dstq], m0
+    add                dstq, dsq
+    dec                  hd
+    jg .h_w32
+    RET
+.h_w64:
+    movu                 m0, [srcq+8*0]
+    movu                 m1, [srcq+8*1]
+    movu                 m2, [srcq+8*4]
+    movu                 m3, [srcq+8*5]
+    add                srcq, ssq
+    pshufb               m0, m4
+    pshufb               m1, m4
+    pshufb               m2, m4
+    pshufb               m3, m4
+    pmaddubsw            m0, m5
+    pmaddubsw            m1, m5
+    pmaddubsw            m2, m5
+    pmaddubsw            m3, m5
+    pmulhrsw             m0, m6
+    pmulhrsw             m1, m6
+    pmulhrsw             m2, m6
+    pmulhrsw             m3, m6
+    packuswb             m0, m1
+    packuswb             m2, m3
+    mova        [dstq+32*0], m0
+    mova        [dstq+32*1], m2
+    add                dstq, dsq
+    dec                  hd
+    jg .h_w64
+    RET
+.h_w128:
+    mov                  t1, -32*3
+.h_w128_loop:
+    movu                 m0, [srcq+t1+32*3+8*0]
+    movu                 m1, [srcq+t1+32*3+8*1]
+    pshufb               m0, m4
+    pshufb               m1, m4
+    pmaddubsw            m0, m5
+    pmaddubsw            m1, m5
+    pmulhrsw             m0, m6
+    pmulhrsw             m1, m6
+    packuswb             m0, m1
+    mova     [dstq+t1+32*3], m0
+    add                  t1, 32
+    jle .h_w128_loop
+    add                srcq, ssq
+    add                dstq, dsq
+    dec                  hd
+    jg .h_w128
+    RET
+.v:
+    movzx                wd, word [t2+wq*2+table_offset(put, _bilin_v)]
+    %assign stack_offset stack_offset - stack_size_padded
+    WIN64_SPILL_XMM       8
+    imul               mxyd, 0xff01
+    vpbroadcastd         m7, [pw_2048]
+    add                mxyd, 16 << 8
+    add                  wq, t2
+    movd                xm6, mxyd
+    vpbroadcastw         m6, xm6
+    jmp                  wq
+.v_w2:
+    movd                xm0,      [srcq+ssq*0]
+.v_w2_loop:
+    pinsrw              xm1, xm0, [srcq+ssq*1], 1 ; 0 1
+    lea                srcq,      [srcq+ssq*2]
+    pinsrw              xm0, xm1, [srcq+ssq*0], 0 ; 2 1
+    pshuflw             xm1, xm1, q2301           ; 1 0
+    punpcklbw           xm1, xm0, xm1
+    pmaddubsw           xm1, xm6
+    pmulhrsw            xm1, xm7
+    packuswb            xm1, xm1
+    pextrw     [dstq+dsq*0], xm1, 1
+    pextrw     [dstq+dsq*1], xm1, 0
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .v_w2_loop
+    RET
+.v_w4:
+    movd                xm0, [srcq+ssq*0]
+.v_w4_loop:
+    vpbroadcastd        xm1, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    vpblendd            xm2, xm1, xm0, 0x01 ; 0 1
+    vpbroadcastd        xm0, [srcq+ssq*0]
+    vpblendd            xm1, xm1, xm0, 0x02 ; 1 2
+    punpcklbw           xm1, xm2
+    pmaddubsw           xm1, xm6
+    pmulhrsw            xm1, xm7
+    packuswb            xm1, xm1
+    movd       [dstq+dsq*0], xm1
+    pextrd     [dstq+dsq*1], xm1, 1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .v_w4_loop
+    RET
+.v_w8:
+    movq                xm0, [srcq+ssq*0]
+.v_w8_loop:
+    vpbroadcastq        xm1, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    vpblendd            xm2, xm1, xm0, 0x03 ; 0 1
+    vpbroadcastq        xm0, [srcq+ssq*0]
+    vpblendd            xm1, xm1, xm0, 0x0c ; 1 2
+    punpcklbw           xm3, xm1, xm2
+    punpckhbw           xm1, xm2
+    pmaddubsw           xm3, xm6
+    pmaddubsw           xm1, xm6
+    pmulhrsw            xm3, xm7
+    pmulhrsw            xm1, xm7
+    packuswb            xm3, xm1
+    movq       [dstq+dsq*0], xm3
+    movhps     [dstq+dsq*1], xm3
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .v_w8_loop
+    RET
+.v_w16:
+    movu                xm0, [srcq+ssq*0]
+.v_w16_loop:
+    vbroadcasti128       m2, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    vpblendd             m3, m2, m0, 0x0f ; 0 1
+    vbroadcasti128       m0, [srcq+ssq*0]
+    vpblendd             m2, m2, m0, 0xf0 ; 1 2
+    punpcklbw            m1, m2, m3
+    punpckhbw            m2, m3
+    pmaddubsw            m1, m6
+    pmaddubsw            m2, m6
+    pmulhrsw             m1, m7
+    pmulhrsw             m2, m7
+    packuswb             m1, m2
+    mova         [dstq+dsq*0], xm1
+    vextracti128 [dstq+dsq*1], m1, 1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .v_w16_loop
+    RET
+.v_w32:
+%macro PUT_BILIN_V_W32 0
+    movu                 m0, [srcq+ssq*0]
+%%loop:
+    movu                 m4, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    punpcklbw            m1, m4, m0
+    punpckhbw            m3, m4, m0
+    movu                 m0, [srcq+ssq*0]
+    punpcklbw            m2, m0, m4
+    punpckhbw            m4, m0, m4
+    pmaddubsw            m1, m6
+    pmaddubsw            m3, m6
+    pmaddubsw            m2, m6
+    pmaddubsw            m4, m6
+    pmulhrsw             m1, m7
+    pmulhrsw             m3, m7
+    pmulhrsw             m2, m7
+    pmulhrsw             m4, m7
+    packuswb             m1, m3
+    packuswb             m2, m4
+    mova       [dstq+dsq*0], m1
+    mova       [dstq+dsq*1], m2
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg %%loop
+%endmacro
+    PUT_BILIN_V_W32
+    RET
+.v_w64:
+    movu                 m0, [srcq+32*0]
+    movu                 m1, [srcq+32*1]
+.v_w64_loop:
+    add                srcq, ssq
+    movu                 m3, [srcq+32*0]
+    movu                 m4, [srcq+32*1]
+    punpcklbw            m2, m3, m0
+    punpckhbw            m5, m3, m0
+    pmaddubsw            m2, m6
+    pmaddubsw            m5, m6
+    mova                 m0, m3
+    pmulhrsw             m2, m7
+    pmulhrsw             m5, m7
+    packuswb             m2, m5
+    punpcklbw            m3, m4, m1
+    punpckhbw            m5, m4, m1
+    pmaddubsw            m3, m6
+    pmaddubsw            m5, m6
+    mova                 m1, m4
+    pmulhrsw             m3, m7
+    pmulhrsw             m5, m7
+    packuswb             m3, m5
+    mova        [dstq+32*0], m2
+    mova        [dstq+32*1], m3
+    add                dstq, dsq
+    dec                  hd
+    jg .v_w64_loop
+    RET
+.v_w128:
+    mov                  t0, dstq
+    mov                  t1, srcq
+    lea                 t2d, [hq+(3<<8)]
+.v_w128_loop:
+    PUT_BILIN_V_W32
+    mov                  hb, t2b
+    add                  t0, 32
+    add                  t1, 32
+    mov                dstq, t0
+    mov                srcq, t1
+    sub                 t2d, 1<<8
+    jg .v_w128_loop
+    RET
+.hv:
+    ; (16 * src[x] + (my * (src[x + src_stride] - src[x])) + 128) >> 8
+    ; = (src[x] + ((my * (src[x + src_stride] - src[x])) >> 4) + 8) >> 4
+    movzx                wd, word [t2+wq*2+table_offset(put, _bilin_hv)]
+    %assign stack_offset stack_offset - stack_size_padded
+    WIN64_SPILL_XMM       8
+    shl                mxyd, 11 ; can't shift by 12 due to signed overflow
+    vpbroadcastd         m7, [pw_2048]
+    movd                xm6, mxyd
+    add                  wq, t2
+    vpbroadcastw         m6, xm6
+    jmp                  wq
+.hv_w2:
+    vpbroadcastd        xm0, [srcq+ssq*0]
+    pshufb              xm0, xm4
+    pmaddubsw           xm0, xm5
+.hv_w2_loop:
+    movd                xm1, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    pinsrd              xm1, [srcq+ssq*0], 1
+    pshufb              xm1, xm4
+    pmaddubsw           xm1, xm5             ; 1 _ 2 _
+    shufps              xm2, xm0, xm1, q1032 ; 0 _ 1 _
+    mova                xm0, xm1
+    psubw               xm1, xm2
+    paddw               xm1, xm1
+    pmulhw              xm1, xm6
+    paddw               xm1, xm2
+    pmulhrsw            xm1, xm7
+    packuswb            xm1, xm1
+    pextrw     [dstq+dsq*0], xm1, 0
+    pextrw     [dstq+dsq*1], xm1, 2
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .hv_w2_loop
+    RET
+.hv_w4:
+    mova                xm4, [bilin_h_shuf4]
+    movddup             xm0, [srcq+ssq*0]
+    pshufb              xm0, xm4
+    pmaddubsw           xm0, xm5
+.hv_w4_loop:
+    movq                xm1, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    movhps              xm1, [srcq+ssq*0]
+    pshufb              xm1, xm4
+    pmaddubsw           xm1, xm5             ; 1 2
+    shufps              xm2, xm0, xm1, q1032 ; 0 1
+    mova                xm0, xm1
+    psubw               xm1, xm2
+    paddw               xm1, xm1
+    pmulhw              xm1, xm6
+    paddw               xm1, xm2
+    pmulhrsw            xm1, xm7
+    packuswb            xm1, xm1
+    movd       [dstq+dsq*0], xm1
+    pextrd     [dstq+dsq*1], xm1, 1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .hv_w4_loop
+    RET
+.hv_w8:
+    vbroadcasti128       m0,     [srcq+ssq*0]
+    pshufb               m0, m4
+    pmaddubsw            m0, m5
+.hv_w8_loop:
+    movu                xm1,     [srcq+ssq*1]
+    lea                srcq,     [srcq+ssq*2]
+    vinserti128          m1, m1, [srcq+ssq*0], 1
+    pshufb               m1, m4
+    pmaddubsw            m1, m5           ; 1 2
+    vperm2i128           m2, m0, m1, 0x21 ; 0 1
+    mova                 m0, m1
+    psubw                m1, m2
+    paddw                m1, m1
+    pmulhw               m1, m6
+    paddw                m1, m2
+    pmulhrsw             m1, m7
+    vextracti128        xm2, m1, 1
+    packuswb            xm1, xm2
+    movq       [dstq+dsq*0], xm1
+    movhps     [dstq+dsq*1], xm1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .hv_w8_loop
+    RET
+.hv_w16:
+    movu                 m0,     [srcq+ssq*0+8*0]
+    vinserti128          m0, m0, [srcq+ssq*0+8*1], 1
+    pshufb               m0, m4
+    pmaddubsw            m0, m5
+.hv_w16_loop:
+    movu                xm2,     [srcq+ssq*1+8*0]
+    vinserti128          m2, m2, [srcq+ssq*1+8*1], 1
+    lea                srcq,     [srcq+ssq*2]
+    movu                xm3,     [srcq+ssq*0+8*0]
+    vinserti128          m3, m3, [srcq+ssq*0+8*1], 1
+    pshufb               m2, m4
+    pshufb               m3, m4
+    pmaddubsw            m2, m5
+    psubw                m1, m2, m0
+    paddw                m1, m1
+    pmulhw               m1, m6
+    paddw                m1, m0
+    pmaddubsw            m0, m3, m5
+    psubw                m3, m0, m2
+    paddw                m3, m3
+    pmulhw               m3, m6
+    paddw                m3, m2
+    pmulhrsw             m1, m7
+    pmulhrsw             m3, m7
+    packuswb             m1, m3
+    vpermq               m1, m1, q3120
+    mova         [dstq+dsq*0], xm1
+    vextracti128 [dstq+dsq*1], m1, 1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .hv_w16_loop
+    RET
+.hv_w32:
+%macro PUT_BILIN_HV_W32 0
+    movu                 m0,     [srcq+8*0]
+    vinserti128          m0, m0, [srcq+8*2], 1
+    movu                 m1,     [srcq+8*1]
+    vinserti128          m1, m1, [srcq+8*3], 1
+    pshufb               m0, m4
+    pshufb               m1, m4
+    pmaddubsw            m0, m5
+    pmaddubsw            m1, m5
+%if WIN64
+    movaps              r4m, xmm8
+%endif
+%%loop:
+    add                srcq, ssq
+    movu                xm2,     [srcq+8*1]
+    vinserti128          m2, m2, [srcq+8*3], 1
+    pshufb               m2, m4
+    pmaddubsw            m2, m5
+    psubw                m3, m2, m1
+    paddw                m3, m3
+    pmulhw               m3, m6
+    paddw                m3, m1
+    mova                 m1, m2
+    pmulhrsw             m8, m3, m7
+    movu                xm2,     [srcq+8*0]
+    vinserti128          m2, m2, [srcq+8*2], 1
+    pshufb               m2, m4
+    pmaddubsw            m2, m5
+    psubw                m3, m2, m0
+    paddw                m3, m3
+    pmulhw               m3, m6
+    paddw                m3, m0
+    mova                 m0, m2
+    pmulhrsw             m3, m7
+    packuswb             m3, m8
+    mova             [dstq], m3
+    add                dstq, dsq
+    dec                  hd
+    jg %%loop
+%if WIN64
+    movaps             xmm8, r4m
+%endif
+%endmacro
+    PUT_BILIN_HV_W32
+    RET
+.hv_w64:
+    mov                  t0, dstq
+    mov                  t1, srcq
+    lea                 t2d, [hq+(1<<8)]
+.hv_w64_loop:
+    PUT_BILIN_HV_W32
+    mov                  hb, t2b
+    add                  t0, 32
+    add                  t1, 32
+    mov                dstq, t0
+    mov                srcq, t1
+    sub                 t2d, 1<<8
+    jg .hv_w64_loop
+    RET
+.hv_w128:
+    mov                  t0, dstq
+    mov                  t1, srcq
+    lea                 t2d, [hq+(3<<8)]
+.hv_w128_loop:
+    PUT_BILIN_HV_W32
+    mov                  hb, t2b
+    add                  t0, 32
+    add                  t1, 32
+    mov                dstq, t0
+    mov                srcq, t1
+    sub                 t2d, 1<<8
+    jg .hv_w128_loop
+    RET
+
+DECLARE_REG_TMP 3, 5, 6
+cglobal prep_bilin, 3, 7, 0, tmp, src, stride, w, h, mxy, stride3
+    movifnidn          mxyd, r5m ; mx
+    lea                  t2, [prep_avx2]
+    tzcnt                wd, wm
+    movifnidn            hd, hm
+    test               mxyd, mxyd
+    jnz .h
+    mov                mxyd, r6m ; my
+    test               mxyd, mxyd
+    jnz .v
+.prep:
+    movzx                wd, word [t2+wq*2+table_offset(prep,)]
+    add                  wq, t2
+    lea            stride3q, [strideq*3]
+    jmp                  wq
+.prep_w4:
+    movd                xm0, [srcq+strideq*0]
+    pinsrd              xm0, [srcq+strideq*1], 1
+    pinsrd              xm0, [srcq+strideq*2], 2
+    pinsrd              xm0, [srcq+stride3q ], 3
+    lea                srcq, [srcq+strideq*4]
+    pmovzxbw             m0, xm0
+    psllw                m0, 4
+    mova             [tmpq], m0
+    add                tmpq, 32
+    sub                  hd, 4
+    jg .prep_w4
+    RET
+.prep_w8:
+    movq                xm0, [srcq+strideq*0]
+    movhps              xm0, [srcq+strideq*1]
+    movq                xm1, [srcq+strideq*2]
+    movhps              xm1, [srcq+stride3q ]
+    lea                srcq, [srcq+strideq*4]
+    pmovzxbw             m0, xm0
+    pmovzxbw             m1, xm1
+    psllw                m0, 4
+    psllw                m1, 4
+    mova        [tmpq+32*0], m0
+    mova        [tmpq+32*1], m1
+    add                tmpq, 32*2
+    sub                  hd, 4
+    jg .prep_w8
+    RET
+.prep_w16:
+    pmovzxbw             m0, [srcq+strideq*0]
+    pmovzxbw             m1, [srcq+strideq*1]
+    pmovzxbw             m2, [srcq+strideq*2]
+    pmovzxbw             m3, [srcq+stride3q ]
+    lea                srcq, [srcq+strideq*4]
+    psllw                m0, 4
+    psllw                m1, 4
+    psllw                m2, 4
+    psllw                m3, 4
+    mova        [tmpq+32*0], m0
+    mova        [tmpq+32*1], m1
+    mova        [tmpq+32*2], m2
+    mova        [tmpq+32*3], m3
+    add                tmpq, 32*4
+    sub                  hd, 4
+    jg .prep_w16
+    RET
+.prep_w32:
+    pmovzxbw             m0, [srcq+strideq*0+16*0]
+    pmovzxbw             m1, [srcq+strideq*0+16*1]
+    pmovzxbw             m2, [srcq+strideq*1+16*0]
+    pmovzxbw             m3, [srcq+strideq*1+16*1]
+    lea                srcq, [srcq+strideq*2]
+    psllw                m0, 4
+    psllw                m1, 4
+    psllw                m2, 4
+    psllw                m3, 4
+    mova        [tmpq+32*0], m0
+    mova        [tmpq+32*1], m1
+    mova        [tmpq+32*2], m2
+    mova        [tmpq+32*3], m3
+    add                tmpq, 32*4
+    sub                  hd, 2
+    jg .prep_w32
+    RET
+.prep_w64:
+    pmovzxbw             m0, [srcq+16*0]
+    pmovzxbw             m1, [srcq+16*1]
+    pmovzxbw             m2, [srcq+16*2]
+    pmovzxbw             m3, [srcq+16*3]
+    add                srcq, strideq
+    psllw                m0, 4
+    psllw                m1, 4
+    psllw                m2, 4
+    psllw                m3, 4
+    mova        [tmpq+32*0], m0
+    mova        [tmpq+32*1], m1
+    mova        [tmpq+32*2], m2
+    mova        [tmpq+32*3], m3
+    add                tmpq, 32*4
+    dec                  hd
+    jg .prep_w64
+    RET
+.prep_w128:
+    pmovzxbw             m0, [srcq+16*0]
+    pmovzxbw             m1, [srcq+16*1]
+    pmovzxbw             m2, [srcq+16*2]
+    pmovzxbw             m3, [srcq+16*3]
+    psllw                m0, 4
+    psllw                m1, 4
+    psllw                m2, 4
+    psllw                m3, 4
+    mova        [tmpq+32*0], m0
+    mova        [tmpq+32*1], m1
+    mova        [tmpq+32*2], m2
+    mova        [tmpq+32*3], m3
+    pmovzxbw             m0, [srcq+16*4]
+    pmovzxbw             m1, [srcq+16*5]
+    pmovzxbw             m2, [srcq+16*6]
+    pmovzxbw             m3, [srcq+16*7]
+    add                tmpq, 32*8
+    add                srcq, strideq
+    psllw                m0, 4
+    psllw                m1, 4
+    psllw                m2, 4
+    psllw                m3, 4
+    mova        [tmpq-32*4], m0
+    mova        [tmpq-32*3], m1
+    mova        [tmpq-32*2], m2
+    mova        [tmpq-32*1], m3
+    dec                  hd
+    jg .prep_w128
+    RET
+.h:
+    ; 16 * src[x] + (mx * (src[x + 1] - src[x]))
+    ; = (16 - mx) * src[x] + mx * src[x + 1]
+    imul               mxyd, 0xff01
+    vbroadcasti128       m4, [bilin_h_shuf8]
+    add                mxyd, 16 << 8
+    movd                xm5, mxyd
+    mov                mxyd, r6m ; my
+    vpbroadcastw         m5, xm5
+    test               mxyd, mxyd
+    jnz .hv
+    movzx                wd, word [t2+wq*2+table_offset(prep, _bilin_h)]
+    add                  wq, t2
+    lea            stride3q, [strideq*3]
+    jmp                  wq
+.h_w4:
+    vbroadcasti128       m4, [bilin_h_shuf4]
+.h_w4_loop:
+    movq                xm0, [srcq+strideq*0]
+    movhps              xm0, [srcq+strideq*1]
+    movq                xm1, [srcq+strideq*2]
+    movhps              xm1, [srcq+stride3q ]
+    lea                srcq, [srcq+strideq*4]
+    vinserti128          m0, m0, xm1, 1
+    pshufb               m0, m4
+    pmaddubsw            m0, m5
+    mova             [tmpq], m0
+    add                tmpq, 32
+    sub                  hd, 4
+    jg .h_w4_loop
+    RET
+.h_w8:
+    movu                xm0,     [srcq+strideq*0]
+    vinserti128          m0, m0, [srcq+strideq*1], 1
+    movu                xm1,     [srcq+strideq*2]
+    vinserti128          m1, m1, [srcq+stride3q ], 1
+    lea                srcq,     [srcq+strideq*4]
+    pshufb               m0, m4
+    pshufb               m1, m4
+    pmaddubsw            m0, m5
+    pmaddubsw            m1, m5
+    mova        [tmpq+32*0], m0
+    mova        [tmpq+32*1], m1
+    add                tmpq, 32*2
+    sub                  hd, 4
+    jg .h_w8
+    RET
+.h_w16:
+    movu                xm0,     [srcq+strideq*0+8*0]
+    vinserti128          m0, m0, [srcq+strideq*0+8*1], 1
+    movu                xm1,     [srcq+strideq*1+8*0]
+    vinserti128          m1, m1, [srcq+strideq*1+8*1], 1
+    movu                xm2,     [srcq+strideq*2+8*0]
+    vinserti128          m2, m2, [srcq+strideq*2+8*1], 1
+    movu                xm3,     [srcq+stride3q +8*0]
+    vinserti128          m3, m3, [srcq+stride3q +8*1], 1
+    lea                srcq,     [srcq+strideq*4]
+    pshufb               m0, m4
+    pshufb               m1, m4
+    pshufb               m2, m4
+    pshufb               m3, m4
+    pmaddubsw            m0, m5
+    pmaddubsw            m1, m5
+    pmaddubsw            m2, m5
+    pmaddubsw            m3, m5
+    mova        [tmpq+32*0], m0
+    mova        [tmpq+32*1], m1
+    mova        [tmpq+32*2], m2
+    mova        [tmpq+32*3], m3
+    add                tmpq, 32*4
+    sub                  hd, 4
+    jg .h_w16
+    RET
+.h_w32:
+    movu                xm0,     [srcq+strideq*0+8*0]
+    vinserti128          m0, m0, [srcq+strideq*0+8*1], 1
+    movu                xm1,     [srcq+strideq*0+8*2]
+    vinserti128          m1, m1, [srcq+strideq*0+8*3], 1
+    movu                xm2,     [srcq+strideq*1+8*0]
+    vinserti128          m2, m2, [srcq+strideq*1+8*1], 1
+    movu                xm3,     [srcq+strideq*1+8*2]
+    vinserti128          m3, m3, [srcq+strideq*1+8*3], 1
+    lea                srcq,     [srcq+strideq*2]
+    pshufb               m0, m4
+    pshufb               m1, m4
+    pshufb               m2, m4
+    pshufb               m3, m4
+    pmaddubsw            m0, m5
+    pmaddubsw            m1, m5
+    pmaddubsw            m2, m5
+    pmaddubsw            m3, m5
+    mova        [tmpq+32*0], m0
+    mova        [tmpq+32*1], m1
+    mova        [tmpq+32*2], m2
+    mova        [tmpq+32*3], m3
+    add                tmpq, 32*4
+    sub                  hd, 2
+    jg .h_w32
+    RET
+.h_w64:
+    movu                xm0,     [srcq+8*0]
+    vinserti128          m0, m0, [srcq+8*1], 1
+    movu                xm1,     [srcq+8*2]
+    vinserti128          m1, m1, [srcq+8*3], 1
+    movu                xm2,     [srcq+8*4]
+    vinserti128          m2, m2, [srcq+8*5], 1
+    movu                xm3,     [srcq+8*6]
+    vinserti128          m3, m3, [srcq+8*7], 1
+    add                srcq, strideq
+    pshufb               m0, m4
+    pshufb               m1, m4
+    pshufb               m2, m4
+    pshufb               m3, m4
+    pmaddubsw            m0, m5
+    pmaddubsw            m1, m5
+    pmaddubsw            m2, m5
+    pmaddubsw            m3, m5
+    mova        [tmpq+32*0], m0
+    mova        [tmpq+32*1], m1
+    mova        [tmpq+32*2], m2
+    mova        [tmpq+32*3], m3
+    add                tmpq, 32*4
+    dec                  hd
+    jg .h_w64
+    RET
+.h_w128:
+    movu                xm0,     [srcq+8*0]
+    vinserti128          m0, m0, [srcq+8*1], 1
+    movu                xm1,     [srcq+8*2]
+    vinserti128          m1, m1, [srcq+8*3], 1
+    movu                xm2,     [srcq+8*4]
+    vinserti128          m2, m2, [srcq+8*5], 1
+    movu                xm3,     [srcq+8*6]
+    vinserti128          m3, m3, [srcq+8*7], 1
+    pshufb               m0, m4
+    pshufb               m1, m4
+    pshufb               m2, m4
+    pshufb               m3, m4
+    pmaddubsw            m0, m5
+    pmaddubsw            m1, m5
+    pmaddubsw            m2, m5
+    pmaddubsw            m3, m5
+    mova        [tmpq+32*0], m0
+    mova        [tmpq+32*1], m1
+    mova        [tmpq+32*2], m2
+    mova        [tmpq+32*3], m3
+    movu                xm0,     [srcq+8* 8]
+    vinserti128          m0, m0, [srcq+8* 9], 1
+    movu                xm1,     [srcq+8*10]
+    vinserti128          m1, m1, [srcq+8*11], 1
+    movu                xm2,     [srcq+8*12]
+    vinserti128          m2, m2, [srcq+8*13], 1
+    movu                xm3,     [srcq+8*14]
+    vinserti128          m3, m3, [srcq+8*15], 1
+    add                tmpq, 32*8
+    add                srcq, strideq
+    pshufb               m0, m4
+    pshufb               m1, m4
+    pshufb               m2, m4
+    pshufb               m3, m4
+    pmaddubsw            m0, m5
+    pmaddubsw            m1, m5
+    pmaddubsw            m2, m5
+    pmaddubsw            m3, m5
+    mova        [tmpq-32*4], m0
+    mova        [tmpq-32*3], m1
+    mova        [tmpq-32*2], m2
+    mova        [tmpq-32*1], m3
+    dec                  hd
+    jg .h_w128
+    RET
+.v:
+    WIN64_SPILL_XMM       7
+    movzx                wd, word [t2+wq*2+table_offset(prep, _bilin_v)]
+    imul               mxyd, 0xff01
+    add                mxyd, 16 << 8
+    add                  wq, t2
+    lea            stride3q, [strideq*3]
+    movd                xm6, mxyd
+    vpbroadcastw         m6, xm6
+    jmp                  wq
+.v_w4:
+    movd                xm0, [srcq+strideq*0]
+.v_w4_loop:
+    vpbroadcastd         m1, [srcq+strideq*2]
+    vpbroadcastd        xm2, [srcq+strideq*1]
+    vpbroadcastd         m3, [srcq+stride3q ]
+    lea                srcq, [srcq+strideq*4]
+    vpblendd             m1, m1, m0, 0x05 ; 0 2 2 2
+    vpbroadcastd         m0, [srcq+strideq*0]
+    vpblendd             m3, m3, m2, 0x0f ; 1 1 3 3
+    vpblendd             m2, m1, m0, 0xa0 ; 0 2 2 4
+    vpblendd             m1, m1, m3, 0xaa ; 0 1 2 3
+    vpblendd             m2, m2, m3, 0x55 ; 1 2 3 4
+    punpcklbw            m2, m1
+    pmaddubsw            m2, m6
+    mova             [tmpq], m2
+    add                tmpq, 32
+    sub                  hd, 4
+    jg .v_w4_loop
+    RET
+.v_w8:
+    movq                xm0, [srcq+strideq*0]
+.v_w8_loop:
+    vpbroadcastq         m1, [srcq+strideq*2]
+    vpbroadcastq         m2, [srcq+strideq*1]
+    vpbroadcastq         m3, [srcq+stride3q ]
+    lea                srcq, [srcq+strideq*4]
+    vpblendd             m1, m1, m0, 0x03 ; 0 2 2 2
+    vpbroadcastq         m0, [srcq+strideq*0]
+    vpblendd             m3, m3, m2, 0x33 ; 1 3 1 3
+    vpblendd             m2, m1, m3, 0x0f ; 1 3 2 2
+    vpblendd             m1, m1, m3, 0xf0 ; 0 2 1 3
+    vpblendd             m2, m2, m0, 0xc0 ; 1 3 2 4
+    punpcklbw            m3, m2, m1
+    punpckhbw            m2, m1
+    pmaddubsw            m3, m6
+    pmaddubsw            m2, m6
+    mova        [tmpq+32*0], m3
+    mova        [tmpq+32*1], m2
+    add                tmpq, 32*2
+    sub                  hd, 4
+    jg .v_w8_loop
+    RET
+.v_w16:
+    vbroadcasti128       m0, [srcq+strideq*0]
+.v_w16_loop:
+    vbroadcasti128       m1, [srcq+strideq*2]
+    vbroadcasti128       m2, [srcq+strideq*1]
+    vbroadcasti128       m3, [srcq+stride3q ]
+    lea                srcq, [srcq+strideq*4]
+    shufpd               m4, m0, m1, 0x0c ; 0 2
+    vbroadcasti128       m0, [srcq+strideq*0]
+    shufpd               m2, m2, m3, 0x0c ; 1 3
+    shufpd               m1, m1, m0, 0x0c ; 2 4
+    punpcklbw            m3, m2, m4
+    punpcklbw            m5, m1, m2
+    punpckhbw            m1, m2
+    punpckhbw            m2, m4
+    pmaddubsw            m3, m6
+    pmaddubsw            m5, m6
+    pmaddubsw            m2, m6
+    pmaddubsw            m1, m6
+    mova        [tmpq+32*0], m3
+    mova        [tmpq+32*1], m5
+    mova        [tmpq+32*2], m2
+    mova        [tmpq+32*3], m1
+    add                tmpq, 32*4
+    sub                  hd, 4
+    jg .v_w16_loop
+    RET
+.v_w32:
+    vpermq               m0, [srcq+strideq*0], q3120
+.v_w32_loop:
+    vpermq               m1, [srcq+strideq*1], q3120
+    vpermq               m2, [srcq+strideq*2], q3120
+    vpermq               m3, [srcq+stride3q ], q3120
+    lea                srcq, [srcq+strideq*4]
+    punpcklbw            m4, m1, m0
+    punpckhbw            m5, m1, m0
+    vpermq               m0, [srcq+strideq*0], q3120
+    pmaddubsw            m4, m6
+    pmaddubsw            m5, m6
+    mova        [tmpq+32*0], m4
+    mova        [tmpq+32*1], m5
+    punpcklbw            m4, m2, m1
+    punpckhbw            m5, m2, m1
+    pmaddubsw            m4, m6
+    pmaddubsw            m5, m6
+    mova        [tmpq+32*2], m4
+    mova        [tmpq+32*3], m5
+    add                tmpq, 32*8
+    punpcklbw            m4, m3, m2
+    punpckhbw            m5, m3, m2
+    punpcklbw            m1, m0, m3
+    punpckhbw            m2, m0, m3
+    pmaddubsw            m4, m6
+    pmaddubsw            m5, m6
+    pmaddubsw            m1, m6
+    pmaddubsw            m2, m6
+    mova        [tmpq-32*4], m4
+    mova        [tmpq-32*3], m5
+    mova        [tmpq-32*2], m1
+    mova        [tmpq-32*1], m2
+    sub                  hd, 4
+    jg .v_w32_loop
+    RET
+.v_w64:
+    vpermq               m0, [srcq+strideq*0+32*0], q3120
+    vpermq               m1, [srcq+strideq*0+32*1], q3120
+.v_w64_loop:
+    vpermq               m2, [srcq+strideq*1+32*0], q3120
+    vpermq               m3, [srcq+strideq*1+32*1], q3120
+    lea                srcq, [srcq+strideq*2]
+    punpcklbw            m4, m2, m0
+    punpckhbw            m5, m2, m0
+    pmaddubsw            m4, m6
+    pmaddubsw            m5, m6
+    mova        [tmpq+32*0], m4
+    mova        [tmpq+32*1], m5
+    punpcklbw            m4, m3, m1
+    punpckhbw            m5, m3, m1
+    vpermq               m0, [srcq+strideq*0+32*0], q3120
+    vpermq               m1, [srcq+strideq*0+32*1], q3120
+    pmaddubsw            m4, m6
+    pmaddubsw            m5, m6
+    mova        [tmpq+32*2], m4
+    mova        [tmpq+32*3], m5
+    add                tmpq, 32*8
+    punpcklbw            m4, m0, m2
+    punpckhbw            m5, m0, m2
+    punpcklbw            m2, m1, m3
+    punpckhbw            m3, m1, m3
+    pmaddubsw            m4, m6
+    pmaddubsw            m5, m6
+    pmaddubsw            m2, m6
+    pmaddubsw            m3, m6
+    mova        [tmpq-32*4], m4
+    mova        [tmpq-32*3], m5
+    mova        [tmpq-32*2], m2
+    mova        [tmpq-32*1], m3
+    sub                  hd, 2
+    jg .v_w64_loop
+    RET
+.v_w128:
+    mov                  t0, tmpq
+    mov                  t1, srcq
+    lea                 t2d, [hq+(3<<8)]
+.v_w128_loop0:
+    vpermq               m0, [srcq+strideq*0], q3120
+.v_w128_loop:
+    vpermq               m1, [srcq+strideq*1], q3120
+    lea                srcq, [srcq+strideq*2]
+    punpcklbw            m2, m1, m0
+    punpckhbw            m3, m1, m0
+    vpermq               m0, [srcq+strideq*0], q3120
+    punpcklbw            m4, m0, m1
+    punpckhbw            m5, m0, m1
+    pmaddubsw            m2, m6
+    pmaddubsw            m3, m6
+    pmaddubsw            m4, m6
+    pmaddubsw            m5, m6
+    mova        [tmpq+32*0], m2
+    mova        [tmpq+32*1], m3
+    mova        [tmpq+32*8], m4
+    mova        [tmpq+32*9], m5
+    add                tmpq, 32*16
+    sub                  hd, 2
+    jg .v_w128_loop
+    mov                  hb, t2b
+    add                  t0, 64
+    add                  t1, 32
+    mov                tmpq, t0
+    mov                srcq, t1
+    sub                 t2d, 1<<8
+    jg .v_w128_loop0
+    RET
+.hv:
+    ; (16 * src[x] + (my * (src[x + src_stride] - src[x])) + 8) >> 4
+    ; = src[x] + (((my * (src[x + src_stride] - src[x])) + 8) >> 4)
+    %assign stack_offset stack_offset - stack_size_padded
+    WIN64_SPILL_XMM       7
+    movzx                wd, word [t2+wq*2+table_offset(prep, _bilin_hv)]
+    shl                mxyd, 11
+    movd                xm6, mxyd
+    add                  wq, t2
+    lea            stride3q, [strideq*3]
+    vpbroadcastw         m6, xm6
+    jmp                  wq
+.hv_w4:
+    vbroadcasti128       m4, [bilin_h_shuf4]
+    vpbroadcastq         m0, [srcq+strideq*0]
+    pshufb               m0, m4
+    pmaddubsw            m0, m5
+.hv_w4_loop:
+    movq                xm1, [srcq+strideq*1]
+    movhps              xm1, [srcq+strideq*2]
+    movq                xm2, [srcq+stride3q ]
+    lea                srcq, [srcq+strideq*4]
+    movhps              xm2, [srcq+strideq*0]
+    vinserti128          m1, m1, xm2, 1
+    pshufb               m1, m4
+    pmaddubsw            m1, m5        ; 1 2 3 4
+    vpblendd             m2, m1, m0, 0xc0
+    vpermq               m2, m2, q2103 ; 0 1 2 3
+    mova                 m0, m1
+    psubw                m1, m2
+    pmulhrsw             m1, m6
+    paddw                m1, m2
+    mova             [tmpq], m1
+    add                tmpq, 32
+    sub                  hd, 4
+    jg .hv_w4_loop
+    RET
+.hv_w8:
+    vbroadcasti128       m0,     [srcq+strideq*0]
+    pshufb               m0, m4
+    pmaddubsw            m0, m5
+.hv_w8_loop:
+    movu                xm1,     [srcq+strideq*1]
+    vinserti128          m1, m1, [srcq+strideq*2], 1
+    movu                xm2,     [srcq+stride3q ]
+    lea                srcq,     [srcq+strideq*4]
+    vinserti128          m2, m2, [srcq+strideq*0], 1
+    pshufb               m1, m4
+    pshufb               m2, m4
+    pmaddubsw            m1, m5           ; 1 2
+    vperm2i128           m3, m0, m1, 0x21 ; 0 1
+    pmaddubsw            m0, m2, m5       ; 3 4
+    vperm2i128           m2, m1, m0, 0x21 ; 2 3
+    psubw                m1, m3
+    pmulhrsw             m1, m6
+    paddw                m1, m3
+    psubw                m3, m0, m2
+    pmulhrsw             m3, m6
+    paddw                m3, m2
+    mova        [tmpq+32*0], m1
+    mova        [tmpq+32*1], m3
+    add                tmpq, 32*2
+    sub                  hd, 4
+    jg .hv_w8_loop
+    RET
+.hv_w16:
+    movu                 m0,     [srcq+strideq*0+8*0]
+    vinserti128          m0, m0, [srcq+strideq*0+8*1], 1
+    pshufb               m0, m4
+    pmaddubsw            m0, m5
+.hv_w16_loop:
+    movu                xm1,     [srcq+strideq*1+8*0]
+    vinserti128          m1, m1, [srcq+strideq*1+8*1], 1
+    lea                srcq,     [srcq+strideq*2]
+    movu                xm2,     [srcq+strideq*0+8*0]
+    vinserti128          m2, m2, [srcq+strideq*0+8*1], 1
+    pshufb               m1, m4
+    pshufb               m2, m4
+    pmaddubsw            m1, m5
+    psubw                m3, m1, m0
+    pmulhrsw             m3, m6
+    paddw                m3, m0
+    pmaddubsw            m0, m2, m5
+    psubw                m2, m0, m1
+    pmulhrsw             m2, m6
+    paddw                m2, m1
+    mova        [tmpq+32*0], m3
+    mova        [tmpq+32*1], m2
+    add                tmpq, 32*2
+    sub                  hd, 2
+    jg .hv_w16_loop
+    RET
+.hv_w32:
+    movu                 m0,     [srcq+8*0]
+    vinserti128          m0, m0, [srcq+8*1], 1
+    movu                 m1,     [srcq+8*2]
+    vinserti128          m1, m1, [srcq+8*3], 1
+    pshufb               m0, m4
+    pshufb               m1, m4
+    pmaddubsw            m0, m5
+    pmaddubsw            m1, m5
+.hv_w32_loop:
+    add                srcq, strideq
+    movu                xm2,     [srcq+8*0]
+    vinserti128          m2, m2, [srcq+8*1], 1
+    pshufb               m2, m4
+    pmaddubsw            m2, m5
+    psubw                m3, m2, m0
+    pmulhrsw             m3, m6
+    paddw                m3, m0
+    mova                 m0, m2
+    mova          [tmpq+ 0], m3
+    movu                xm2,     [srcq+8*2]
+    vinserti128          m2, m2, [srcq+8*3], 1
+    pshufb               m2, m4
+    pmaddubsw            m2, m5
+    psubw                m3, m2, m1
+    pmulhrsw             m3, m6
+    paddw                m3, m1
+    mova                 m1, m2
+    mova          [tmpq+32], m3
+    add                tmpq, 32*2
+    dec                  hd
+    jg .hv_w32_loop
+    RET
+.hv_w64:
+    mov                  t0, tmpq
+    mov                  t1, srcq
+    lea                 t2d, [hq+(3<<8)]
+.hv_w64_loop0:
+    movu                 m0,     [srcq+strideq*0+8*0]
+    vinserti128          m0, m0, [srcq+strideq*0+8*1], 1
+    pshufb               m0, m4
+    pmaddubsw            m0, m5
+.hv_w64_loop:
+    movu                xm1,     [srcq+strideq*1+8*0]
+    vinserti128          m1, m1, [srcq+strideq*1+8*1], 1
+    lea                srcq,     [srcq+strideq*2]
+    movu                xm2,     [srcq+strideq*0+8*0]
+    vinserti128          m2, m2, [srcq+strideq*0+8*1], 1
+    pshufb               m1, m4
+    pshufb               m2, m4
+    pmaddubsw            m1, m5
+    psubw                m3, m1, m0
+    pmulhrsw             m3, m6
+    paddw                m3, m0
+    pmaddubsw            m0, m2, m5
+    psubw                m2, m0, m1
+    pmulhrsw             m2, m6
+    paddw                m2, m1
+    mova        [tmpq+32*0], m3
+    add                tmpq, 32*8
+    mova        [tmpq-32*4], m2
+    sub                  hd, 2
+    jg .hv_w64_loop
+    mov                  hb, t2b
+    add                  t0, 32
+    add                  t1, 16
+    mov                tmpq, t0
+    mov                srcq, t1
+    sub                 t2d, 1<<8
+    jg .hv_w64_loop0
+    RET
+.hv_w128:
+    mov                  t0, tmpq
+    mov                  t1, srcq
+    lea                 t2d, [hq+(7<<8)]
+.hv_w128_loop0:
+    movu                 m0,     [srcq+strideq*0+8*0]
+    vinserti128          m0, m0, [srcq+strideq*0+8*1], 1
+    pshufb               m0, m4
+    pmaddubsw            m0, m5
+.hv_w128_loop:
+    movu                xm1,     [srcq+strideq*1+8*0]
+    vinserti128          m1, m1, [srcq+strideq*1+8*1], 1
+    lea                srcq,     [srcq+strideq*2]
+    movu                xm2,     [srcq+strideq*0+8*0]
+    vinserti128          m2, m2, [srcq+strideq*0+8*1], 1
+    pshufb               m1, m4
+    pshufb               m2, m4
+    pmaddubsw            m1, m5
+    psubw                m3, m1, m0
+    pmulhrsw             m3, m6
+    paddw                m3, m0
+    pmaddubsw            m0, m2, m5
+    psubw                m2, m0, m1
+    pmulhrsw             m2, m6
+    paddw                m2, m1
+    mova        [tmpq+32*0], m3
+    mova        [tmpq+32*8], m2
+    add                tmpq, 32*16
+    sub                  hd, 2
+    jg .hv_w128_loop
+    mov                  hb, t2b
+    add                  t0, 32
+    add                  t1, 16
+    mov                tmpq, t0
+    mov                srcq, t1
+    sub                 t2d, 1<<8
+    jg .hv_w128_loop0
+    RET
+
+; int8_t subpel_filters[5][15][8]
+%assign FILTER_REGULAR (0*15 << 16) | 3*15
+%assign FILTER_SMOOTH  (1*15 << 16) | 4*15
+%assign FILTER_SHARP   (2*15 << 16) | 3*15
+
+%if WIN64
+DECLARE_REG_TMP 4, 5
+%else
+DECLARE_REG_TMP 7, 8
+%endif
+%macro PUT_8TAP_FN 3 ; type, type_h, type_v
+cglobal put_8tap_%1
+    mov                 t0d, FILTER_%2
+    mov                 t1d, FILTER_%3
+%ifnidn %1, sharp_smooth ; skip the jump in the last filter
+    jmp mangle(private_prefix %+ _put_8tap %+ SUFFIX)
+%endif
+%endmacro
+
+PUT_8TAP_FN regular,        REGULAR, REGULAR
+PUT_8TAP_FN regular_sharp,  REGULAR, SHARP
+PUT_8TAP_FN regular_smooth, REGULAR, SMOOTH
+PUT_8TAP_FN smooth_regular, SMOOTH,  REGULAR
+PUT_8TAP_FN smooth,         SMOOTH,  SMOOTH
+PUT_8TAP_FN smooth_sharp,   SMOOTH,  SHARP
+PUT_8TAP_FN sharp_regular,  SHARP,   REGULAR
+PUT_8TAP_FN sharp,          SHARP,   SHARP
+PUT_8TAP_FN sharp_smooth,   SHARP,   SMOOTH
+
+cglobal put_8tap, 4, 9, 0, dst, ds, src, ss, w, h, mx, my, ss3
+    imul                mxd, mxm, 0x010101
+    add                 mxd, t0d ; 8tap_h, mx, 4tap_h
+    imul                myd, mym, 0x010101
+    add                 myd, t1d ; 8tap_v, my, 4tap_v
+    lea                  r8, [put_avx2]
+    movsxd               wq, wm
+    movifnidn            hd, hm
+    test                mxd, 0xf00
+    jnz .h
+    test                myd, 0xf00
+    jnz .v
+    tzcnt                wd, wd
+    movzx                wd, word [r8+wq*2+table_offset(put,)]
+    add                  wq, r8
+    lea                  r6, [ssq*3]
+    lea                  r7, [dsq*3]
+%if WIN64
+    pop                  r8
+%endif
+    jmp                  wq
+.h:
+    test                myd, 0xf00
+    jnz .hv
+    vpbroadcastd         m5, [pw_34] ; 2 + (8 << 2)
+    WIN64_SPILL_XMM      11
+    cmp                  wd, 4
+    jl .h_w2
+    vbroadcasti128       m6, [subpel_h_shufA]
+    je .h_w4
+    tzcnt                wd, wd
+    vbroadcasti128       m7, [subpel_h_shufB]
+    vbroadcasti128       m8, [subpel_h_shufC]
+    shr                 mxd, 16
+    sub                srcq, 3
+    movzx                wd, word [r8+wq*2+table_offset(put, _8tap_h)]
+    vpbroadcastd         m9, [r8+mxq*8+subpel_filters-put_avx2+0]
+    vpbroadcastd        m10, [r8+mxq*8+subpel_filters-put_avx2+4]
+    add                  wq, r8
+    jmp                  wq
+.h_w2:
+    movzx               mxd, mxb
+    dec                srcq
+    mova                xm4, [subpel_h_shuf4]
+    vpbroadcastd        xm3, [r8+mxq*8+subpel_filters-put_avx2+2]
+.h_w2_loop:
+    movq                xm0, [srcq+ssq*0]
+    movhps              xm0, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    pshufb              xm0, xm4
+    pmaddubsw           xm0, xm3
+    phaddw              xm0, xm0
+    paddw               xm0, xm5
+    psraw               xm0, 6
+    packuswb            xm0, xm0
+    pextrw     [dstq+dsq*0], xm0, 0
+    pextrw     [dstq+dsq*1], xm0, 1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .h_w2_loop
+    RET
+.h_w4:
+    movzx               mxd, mxb
+    dec                srcq
+    vpbroadcastd        xm3, [r8+mxq*8+subpel_filters-put_avx2+2]
+.h_w4_loop:
+    movq                xm0, [srcq+ssq*0]
+    movq                xm1, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    pshufb              xm0, xm6
+    pshufb              xm1, xm6
+    pmaddubsw           xm0, xm3
+    pmaddubsw           xm1, xm3
+    phaddw              xm0, xm1
+    paddw               xm0, xm5
+    psraw               xm0, 6
+    packuswb            xm0, xm0
+    movd       [dstq+dsq*0], xm0
+    pextrd     [dstq+dsq*1], xm0, 1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .h_w4_loop
+    RET
+.h_w8:
+%macro PUT_8TAP_H 4 ; dst/src, tmp[1-3]
+    pshufb              m%2, m%1, m7
+    pshufb              m%3, m%1, m8
+    pshufb              m%1, m6
+    pmaddubsw           m%4, m%2, m9
+    pmaddubsw           m%2, m10
+    pmaddubsw           m%3, m10
+    pmaddubsw           m%1, m9
+    paddw               m%3, m%4
+    paddw               m%1, m%2
+    phaddw              m%1, m%3
+    paddw               m%1, m5
+    psraw               m%1, 6
+%endmacro
+    movu                xm0,     [srcq+ssq*0]
+    vinserti128          m0, m0, [srcq+ssq*1], 1
+    lea                srcq,     [srcq+ssq*2]
+    PUT_8TAP_H            0, 1, 2, 3
+    vextracti128        xm1, m0, 1
+    packuswb            xm0, xm1
+    movq       [dstq+dsq*0], xm0
+    movhps     [dstq+dsq*1], xm0
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .h_w8
+    RET
+.h_w16:
+    movu                xm0,     [srcq+ssq*0+8*0]
+    vinserti128          m0, m0, [srcq+ssq*1+8*0], 1
+    movu                xm1,     [srcq+ssq*0+8*1]
+    vinserti128          m1, m1, [srcq+ssq*1+8*1], 1
+    PUT_8TAP_H            0, 2, 3, 4
+    lea                srcq, [srcq+ssq*2]
+    PUT_8TAP_H            1, 2, 3, 4
+    packuswb             m0, m1
+    mova         [dstq+dsq*0], xm0
+    vextracti128 [dstq+dsq*1], m0, 1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .h_w16
+    RET
+.h_w32:
+    xor                 r6d, r6d
+    jmp .h_start
+.h_w64:
+    mov                  r6, -32*1
+    jmp .h_start
+.h_w128:
+    mov                  r6, -32*3
+.h_start:
+    sub                srcq, r6
+    sub                dstq, r6
+    mov                  r4, r6
+.h_loop:
+    movu                 m0, [srcq+r6+8*0]
+    movu                 m1, [srcq+r6+8*1]
+    PUT_8TAP_H            0, 2, 3, 4
+    PUT_8TAP_H            1, 2, 3, 4
+    packuswb             m0, m1
+    mova          [dstq+r6], m0
+    add                  r6, 32
+    jle .h_loop
+    add                srcq, ssq
+    add                dstq, dsq
+    mov                  r6, r4
+    dec                  hd
+    jg .h_loop
+    RET
+.v:
+    %assign stack_offset stack_offset - stack_size_padded
+    WIN64_SPILL_XMM      16
+    movzx               mxd, myb
+    shr                 myd, 16
+    cmp                  hd, 4
+    cmovle              myd, mxd
+    tzcnt               r6d, wd
+    movzx               r6d, word [r8+r6*2+table_offset(put, _8tap_v)]
+    vpbroadcastd         m7, [pw_512]
+    lea                 myq, [r8+myq*8+subpel_filters-put_avx2]
+    vpbroadcastw         m8, [myq+0]
+    vpbroadcastw         m9, [myq+2]
+    vpbroadcastw        m10, [myq+4]
+    vpbroadcastw        m11, [myq+6]
+    add                  r6, r8
+    lea                ss3q, [ssq*3]
+    sub                srcq, ss3q
+    jmp                  r6
+.v_w2:
+    movd                xm2, [srcq+ssq*0]
+    pinsrw              xm2, [srcq+ssq*1], 2
+    pinsrw              xm2, [srcq+ssq*2], 4
+    pinsrw              xm2, [srcq+ss3q ], 6 ; 0 1 2 3
+    lea                srcq, [srcq+ssq*4]
+    movd                xm3, [srcq+ssq*0]
+    vpbroadcastd        xm1, [srcq+ssq*1]
+    vpbroadcastd        xm0, [srcq+ssq*2]
+    add                srcq, ss3q
+    vpblendd            xm3, xm3, xm1, 0x02  ; 4 5
+    vpblendd            xm1, xm1, xm0, 0x02  ; 5 6
+    palignr             xm4, xm3, xm2, 4     ; 1 2 3 4
+    punpcklbw           xm3, xm1             ; 45 56
+    punpcklbw           xm1, xm2, xm4        ; 01 12
+    punpckhbw           xm2, xm4             ; 23 34
+.v_w2_loop:
+    pmaddubsw           xm5, xm1, xm8        ; a0 b0
+    mova                xm1, xm2
+    pmaddubsw           xm2, xm9             ; a1 b1
+    paddw               xm5, xm2
+    mova                xm2, xm3
+    pmaddubsw           xm3, xm10            ; a2 b2
+    paddw               xm5, xm3
+    vpbroadcastd        xm4, [srcq+ssq*0]
+    vpblendd            xm3, xm0, xm4, 0x02  ; 6 7
+    vpbroadcastd        xm0, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    vpblendd            xm4, xm4, xm0, 0x02  ; 7 8
+    punpcklbw           xm3, xm4             ; 67 78
+    pmaddubsw           xm4, xm3, xm11       ; a3 b3
+    paddw               xm5, xm4
+    pmulhrsw            xm5, xm7
+    packuswb            xm5, xm5
+    pextrw     [dstq+dsq*0], xm5, 0
+    pextrw     [dstq+dsq*1], xm5, 2
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .v_w2_loop
+    RET
+.v_w4:
+    movd                xm2, [srcq+ssq*0]
+    pinsrd              xm2, [srcq+ssq*1], 1
+    pinsrd              xm2, [srcq+ssq*2], 2
+    pinsrd              xm2, [srcq+ss3q ], 3 ; 0 1 2 3
+    lea                srcq, [srcq+ssq*4]
+    movd                xm3, [srcq+ssq*0]
+    vpbroadcastd        xm1, [srcq+ssq*1]
+    vpbroadcastd        xm0, [srcq+ssq*2]
+    add                srcq, ss3q
+    vpblendd            xm3, xm3, xm1, 0x02  ; 4 5
+    vpblendd            xm1, xm1, xm0, 0x02  ; 5 6
+    palignr             xm4, xm3, xm2, 4     ; 1 2 3 4
+    punpcklbw           xm3, xm1             ; 45 56
+    punpcklbw           xm1, xm2, xm4        ; 01 12
+    punpckhbw           xm2, xm4             ; 23 34
+.v_w4_loop:
+    pmaddubsw           xm5, xm1, xm8        ; a0 b0
+    mova                xm1, xm2
+    pmaddubsw           xm2, xm9             ; a1 b1
+    paddw               xm5, xm2
+    mova                xm2, xm3
+    pmaddubsw           xm3, xm10            ; a2 b2
+    paddw               xm5, xm3
+    vpbroadcastd        xm4, [srcq+ssq*0]
+    vpblendd            xm3, xm0, xm4, 0x02  ; 6 7
+    vpbroadcastd        xm0, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    vpblendd            xm4, xm4, xm0, 0x02  ; 7 8
+    punpcklbw           xm3, xm4             ; 67 78
+    pmaddubsw           xm4, xm3, xm11       ; a3 b3
+    paddw               xm5, xm4
+    pmulhrsw            xm5, xm7
+    packuswb            xm5, xm5
+    movd       [dstq+dsq*0], xm5
+    pextrd     [dstq+dsq*1], xm5, 1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .v_w4_loop
+    RET
+.v_w8:
+    movq                xm1, [srcq+ssq*0]
+    vpbroadcastq         m4, [srcq+ssq*1]
+    vpbroadcastq         m2, [srcq+ssq*2]
+    vpbroadcastq         m5, [srcq+ss3q ]
+    lea                srcq, [srcq+ssq*4]
+    vpbroadcastq         m3, [srcq+ssq*0]
+    vpbroadcastq         m6, [srcq+ssq*1]
+    vpbroadcastq         m0, [srcq+ssq*2]
+    add                srcq, ss3q
+    vpblendd             m1, m1, m4, 0x30
+    vpblendd             m4, m4, m2, 0x30
+    punpcklbw            m1, m4 ; 01 12
+    vpblendd             m2, m2, m5, 0x30
+    vpblendd             m5, m5, m3, 0x30
+    punpcklbw            m2, m5 ; 23 34
+    vpblendd             m3, m3, m6, 0x30
+    vpblendd             m6, m6, m0, 0x30
+    punpcklbw            m3, m6 ; 45 56
+.v_w8_loop:
+    pmaddubsw            m5, m1, m8  ; a0 b0
+    mova                 m1, m2
+    pmaddubsw            m2, m9      ; a1 b1
+    paddw                m5, m2
+    mova                 m2, m3
+    pmaddubsw            m3, m10     ; a2 b2
+    paddw                m5, m3
+    vpbroadcastq         m4, [srcq+ssq*0]
+    vpblendd             m3, m0, m4, 0x30
+    vpbroadcastq         m0, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    vpblendd             m4, m4, m0, 0x30
+    punpcklbw            m3, m4      ; 67 78
+    pmaddubsw            m4, m3, m11 ; a3 b3
+    paddw                m5, m4
+    pmulhrsw             m5, m7
+    vextracti128        xm4, m5, 1
+    packuswb            xm5, xm4
+    movq       [dstq+dsq*0], xm5
+    movhps     [dstq+dsq*1], xm5
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .v_w8_loop
+    RET
+.v_w16:
+.v_w32:
+.v_w64:
+.v_w128:
+    lea                 r6d, [wq-16]
+    mov                  r4, dstq
+    mov                  r7, srcq
+    shl                 r6d, 4
+    mov                 r6b, hb
+.v_w16_loop0:
+    vbroadcasti128       m4, [srcq+ssq*0]
+    vbroadcasti128       m5, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    vbroadcasti128       m0, [srcq+ssq*1]
+    vbroadcasti128       m6, [srcq+ssq*0]
+    lea                srcq, [srcq+ssq*2]
+    vbroadcasti128       m1, [srcq+ssq*0]
+    vbroadcasti128       m2, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    vbroadcasti128       m3, [srcq+ssq*0]
+    shufpd               m4, m4, m0, 0x0c
+    shufpd               m5, m5, m1, 0x0c
+    punpcklbw            m1, m4, m5 ; 01
+    punpckhbw            m4, m5     ; 34
+    shufpd               m6, m6, m2, 0x0c
+    punpcklbw            m2, m5, m6 ; 12
+    punpckhbw            m5, m6     ; 45
+    shufpd               m0, m0, m3, 0x0c
+    punpcklbw            m3, m6, m0 ; 23
+    punpckhbw            m6, m0     ; 56
+.v_w16_loop:
+    vbroadcasti128      m12, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    vbroadcasti128      m13, [srcq+ssq*0]
+    pmaddubsw           m14, m1, m8  ; a0
+    pmaddubsw           m15, m2, m8  ; b0
+    mova                 m1, m3
+    mova                 m2, m4
+    pmaddubsw            m3, m9      ; a1
+    pmaddubsw            m4, m9      ; b1
+    paddw               m14, m3
+    paddw               m15, m4
+    mova                 m3, m5
+    mova                 m4, m6
+    pmaddubsw            m5, m10     ; a2
+    pmaddubsw            m6, m10     ; b2
+    paddw               m14, m5
+    paddw               m15, m6
+    shufpd               m6, m0, m12, 0x0d
+    shufpd               m0, m12, m13, 0x0c
+    punpcklbw            m5, m6, m0  ; 67
+    punpckhbw            m6, m0      ; 78
+    pmaddubsw           m12, m5, m11 ; a3
+    pmaddubsw           m13, m6, m11 ; b3
+    paddw               m14, m12
+    paddw               m15, m13
+    pmulhrsw            m14, m7
+    pmulhrsw            m15, m7
+    packuswb            m14, m15
+    vpermq              m14, m14, q3120
+    mova         [dstq+dsq*0], xm14
+    vextracti128 [dstq+dsq*1], m14, 1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .v_w16_loop
+    mov                  hb, r6b
+    add                  r4, 16
+    add                  r7, 16
+    mov                dstq, r4
+    mov                srcq, r7
+    sub                 r6d, 1<<8
+    jg .v_w16_loop0
+    RET
+.hv:
+    %assign stack_offset stack_offset - stack_size_padded
+    WIN64_SPILL_XMM      16
+    cmp                  wd, 4
+    jg .hv_w8
+    movzx               mxd, mxb
+    dec                srcq
+    vpbroadcastd         m7, [r8+mxq*8+subpel_filters-put_avx2+2]
+    movzx               mxd, myb
+    shr                 myd, 16
+    cmp                  hd, 4
+    cmovle              myd, mxd
+    vpbroadcastq         m0, [r8+myq*8+subpel_filters-put_avx2]
+    lea                ss3q, [ssq*3]
+    sub                srcq, ss3q
+    punpcklbw            m0, m0
+    psraw                m0, 8 ; sign-extend
+    vpbroadcastd         m8, [pw_8192]
+    vpbroadcastd         m9, [pd_512]
+    pshufd              m10, m0, q0000
+    pshufd              m11, m0, q1111
+    pshufd              m12, m0, q2222
+    pshufd              m13, m0, q3333
+    cmp                  wd, 4
+    je .hv_w4
+    vbroadcasti128       m6, [subpel_h_shuf4]
+    movq                xm2, [srcq+ssq*0]
+    movhps              xm2, [srcq+ssq*1]
+    movq                xm0, [srcq+ssq*2]
+    movhps              xm0, [srcq+ss3q ]
+    lea                srcq, [srcq+ssq*4]
+    vpbroadcastq         m3, [srcq+ssq*0]
+    vpbroadcastq         m4, [srcq+ssq*1]
+    vpbroadcastq         m1, [srcq+ssq*2]
+    add                srcq, ss3q
+    vpblendd             m2, m2, m3, 0x30
+    vpblendd             m0, m0, m1, 0x30
+    vpblendd             m2, m2, m4, 0xc0
+    pshufb               m2, m6
+    pshufb               m0, m6
+    pmaddubsw            m2, m7
+    pmaddubsw            m0, m7
+    phaddw               m2, m0
+    pmulhrsw             m2, m8
+    vextracti128        xm3, m2, 1
+    palignr             xm4, xm3, xm2, 4
+    punpcklwd           xm1, xm2, xm4  ; 01 12
+    punpckhwd           xm2, xm4       ; 23 34
+    pshufd              xm0, xm3, q2121
+    punpcklwd           xm3, xm0       ; 45 56
+.hv_w2_loop:
+    pmaddwd             xm5, xm1, xm10 ; a0 b0
+    mova                xm1, xm2
+    pmaddwd             xm2, xm11      ; a1 b1
+    paddd               xm5, xm2
+    mova                xm2, xm3
+    pmaddwd             xm3, xm12      ; a2 b2
+    paddd               xm5, xm3
+    movq                xm4, [srcq+ssq*0]
+    movhps              xm4, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    pshufb              xm4, xm6
+    pmaddubsw           xm4, xm7
+    phaddw              xm4, xm4
+    pmulhrsw            xm4, xm8
+    palignr             xm3, xm4, xm0, 12
+    mova                xm0, xm4
+    punpcklwd           xm3, xm0       ; 67 78
+    pmaddwd             xm4, xm3, xm13 ; a3 b3
+    paddd               xm5, xm9
+    paddd               xm5, xm4
+    psrad               xm5, 10
+    packssdw            xm5, xm5
+    packuswb            xm5, xm5
+    pextrw     [dstq+dsq*0], xm5, 0
+    pextrw     [dstq+dsq*1], xm5, 1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .hv_w2_loop
+    RET
+.hv_w4:
+    mova                 m6, [subpel_h_shuf4]
+    vpbroadcastq         m2, [srcq+ssq*0]
+    vpbroadcastq         m4, [srcq+ssq*1]
+    vpbroadcastq         m0, [srcq+ssq*2]
+    vpbroadcastq         m5, [srcq+ss3q ]
+    lea                srcq, [srcq+ssq*4]
+    vpbroadcastq         m3, [srcq+ssq*0]
+    vpblendd             m2, m2, m4, 0xcc ; 0 1
+    vpbroadcastq         m4, [srcq+ssq*1]
+    vpbroadcastq         m1, [srcq+ssq*2]
+    add                srcq, ss3q
+    vpblendd             m0, m0, m5, 0xcc ; 2 3
+    vpblendd             m3, m3, m4, 0xcc ; 4 5
+    pshufb               m2, m6
+    pshufb               m0, m6
+    pshufb               m3, m6
+    pshufb               m1, m6
+    pmaddubsw            m2, m7
+    pmaddubsw            m0, m7
+    pmaddubsw            m3, m7
+    pmaddubsw            m1, m7
+    phaddw               m2, m0
+    phaddw               m3, m1
+    pmulhrsw             m2, m8
+    pmulhrsw             m3, m8
+    palignr              m4, m3, m2, 4
+    punpcklwd            m1, m2, m4  ; 01 12
+    punpckhwd            m2, m4      ; 23 34
+    pshufd               m0, m3, q2121
+    punpcklwd            m3, m0      ; 45 56
+.hv_w4_loop:
+    pmaddwd              m5, m1, m10 ; a0 b0
+    mova                 m1, m2
+    pmaddwd              m2, m11     ; a1 b1
+    paddd                m5, m2
+    mova                 m2, m3
+    pmaddwd              m3, m12     ; a2 b2
+    paddd                m5, m3
+    vpbroadcastq         m4, [srcq+ssq*0]
+    vpbroadcastq         m3, [srcq+ssq*1]
+    lea                srcq, [srcq+ssq*2]
+    vpblendd             m4, m4, m3, 0xcc ; 7 8
+    pshufb               m4, m6
+    pmaddubsw            m4, m7
+    phaddw               m4, m4
+    pmulhrsw             m4, m8
+    palignr              m3, m4, m0, 12
+    mova                 m0, m4
+    punpcklwd            m3, m0      ; 67 78
+    pmaddwd              m4, m3, m13 ; a3 b3
+    paddd                m5, m9
+    paddd                m5, m4
+    psrad                m5, 10
+    vextracti128        xm4, m5, 1
+    packssdw            xm5, xm4
+    packuswb            xm5, xm5
+    pshuflw             xm5, xm5, q3120
+    movd       [dstq+dsq*0], xm5
+    pextrd     [dstq+dsq*1], xm5, 1
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .hv_w4_loop
+    RET
+.hv_w8:
+    shr                 mxd, 16
+    sub                srcq, 3
+    vpbroadcastd        m10, [r8+mxq*8+subpel_filters-put_avx2+0]
+    vpbroadcastd        m11, [r8+mxq*8+subpel_filters-put_avx2+4]
+    movzx               mxd, myb
+    shr                 myd, 16
+    cmp                  hd, 4
+    cmovle              myd, mxd
+    vpbroadcastq         m0, [r8+myq*8+subpel_filters-put_avx2]
+    lea                ss3q, [ssq*3]
+    sub                srcq, ss3q
+    punpcklbw            m0, m0
+    psraw                m0, 8 ; sign-extend
+    pshufd              m12, m0, q0000
+    pshufd              m13, m0, q1111
+    pshufd              m14, m0, q2222
+    pshufd              m15, m0, q3333
+    lea                 r6d, [wq-8]
+    mov                  r4, dstq
+    mov                  r7, srcq
+    shl                 r6d, 5
+    mov                 r6b, hb
+.hv_w8_loop0:
+    vbroadcasti128       m7, [subpel_h_shufA]
+    vbroadcasti128       m8, [subpel_h_shufB]
+    vbroadcasti128       m9, [subpel_h_shufC]
+    movu                xm4,     [srcq+ssq*0]
+    movu                xm5,     [srcq+ssq*1]
+    lea                srcq,     [srcq+ssq*2]
+    movu                xm6,     [srcq+ssq*0]
+    vbroadcasti128       m0,     [srcq+ssq*1]
+    lea                srcq,     [srcq+ssq*2]
+    vpblendd             m4, m4, m0, 0xf0        ; 0 3
+    vinserti128          m5, m5, [srcq+ssq*0], 1 ; 1 4
+    vinserti128          m6, m6, [srcq+ssq*1], 1 ; 2 5
+    lea                srcq,     [srcq+ssq*2]
+    vinserti128          m0, m0, [srcq+ssq*0], 1 ; 3 6
+%macro HV_H_W8 4-7 ; src/dst, tmp[1-3], shuf[1-3]
+    pshufb               %3, %1, %6
+    pshufb               %4, %1, %7
+    pshufb               %1, %5
+    pmaddubsw            %2, %3, m10
+    pmaddubsw            %4, m11
+    pmaddubsw            %3, m11
+    pmaddubsw            %1, m10
+    paddw                %2, %4
+    paddw                %1, %3
+    phaddw               %1, %2
+%endmacro
+    HV_H_W8              m4, m1, m2, m3, m7, m8, m9
+    HV_H_W8              m5, m1, m2, m3, m7, m8, m9
+    HV_H_W8              m6, m1, m2, m3, m7, m8, m9
+    HV_H_W8              m0, m1, m2, m3, m7, m8, m9
+    vpbroadcastd         m7, [pw_8192]
+    vpermq               m4, m4, q3120
+    vpermq               m5, m5, q3120
+    vpermq               m6, m6, q3120
+    pmulhrsw             m0, m7
+    pmulhrsw             m4, m7
+    pmulhrsw             m5, m7
+    pmulhrsw             m6, m7
+    vpermq               m7, m0, q3120
+    punpcklwd            m1, m4, m5  ; 01
+    punpckhwd            m4, m5      ; 34
+    punpcklwd            m2, m5, m6  ; 12
+    punpckhwd            m5, m6      ; 45
+    punpcklwd            m3, m6, m7  ; 23
+    punpckhwd            m6, m7      ; 56
+.hv_w8_loop:
+    vextracti128        r6m, m0, 1 ; not enough registers
+    movu                xm0,     [srcq+ssq*1]
+    lea                srcq,     [srcq+ssq*2]
+    vinserti128          m0, m0, [srcq+ssq*0], 1 ; 7 8
+    pmaddwd              m8, m1, m12 ; a0
+    pmaddwd              m9, m2, m12 ; b0
+    mova                 m1, m3
+    mova                 m2, m4
+    pmaddwd              m3, m13     ; a1
+    pmaddwd              m4, m13     ; b1
+    paddd                m8, m3
+    paddd                m9, m4
+    mova                 m3, m5
+    mova                 m4, m6
+    pmaddwd              m5, m14     ; a2
+    pmaddwd              m6, m14     ; b2
+    paddd                m8, m5
+    paddd                m9, m6
+    vbroadcasti128       m6, [subpel_h_shufB]
+    vbroadcasti128       m7, [subpel_h_shufC]
+    vbroadcasti128       m5, [subpel_h_shufA]
+    HV_H_W8              m0, m5, m6, m7, m5, m6, m7
+    vpbroadcastd         m5, [pw_8192]
+    vpbroadcastd         m7, [pd_512]
+    vbroadcasti128       m6, r6m
+    pmulhrsw             m0, m5
+    paddd                m8, m7
+    paddd                m9, m7
+    vpermq               m7, m0, q3120    ; 7 8
+    shufpd               m6, m6, m7, 0x04 ; 6 7
+    punpcklwd            m5, m6, m7  ; 67
+    punpckhwd            m6, m7      ; 78
+    pmaddwd              m7, m5, m15 ; a3
+    paddd                m8, m7
+    pmaddwd              m7, m6, m15 ; b3
+    paddd                m7, m9
+    psrad                m8, 10
+    psrad                m7, 10
+    packssdw             m8, m7
+    vextracti128        xm7, m8, 1
+    packuswb            xm8, xm7
+    pshufd              xm7, xm8, q3120
+    movq       [dstq+dsq*0], xm7
+    movhps     [dstq+dsq*1], xm7
+    lea                dstq, [dstq+dsq*2]
+    sub                  hd, 2
+    jg .hv_w8_loop
+    mov                  hb, r6b
+    add                  r4, 8
+    add                  r7, 8
+    mov                dstq, r4
+    mov                srcq, r7
+    sub                 r6d, 1<<8
+    jg .hv_w8_loop0
+    RET
+
+%if WIN64
+DECLARE_REG_TMP 6, 4
+%else
+DECLARE_REG_TMP 6, 7
+%endif
+%macro PREP_8TAP_FN 3 ; type, type_h, type_v
+cglobal prep_8tap_%1
+    mov                 t0d, FILTER_%2
+    mov                 t1d, FILTER_%3
+%ifnidn %1, sharp_smooth ; skip the jump in the last filter
+    jmp mangle(private_prefix %+ _prep_8tap %+ SUFFIX)
+%endif
+%endmacro
+
+PREP_8TAP_FN regular,        REGULAR, REGULAR
+PREP_8TAP_FN regular_sharp,  REGULAR, SHARP
+PREP_8TAP_FN regular_smooth, REGULAR, SMOOTH
+PREP_8TAP_FN smooth_regular, SMOOTH,  REGULAR
+PREP_8TAP_FN smooth,         SMOOTH,  SMOOTH
+PREP_8TAP_FN smooth_sharp,   SMOOTH,  SHARP
+PREP_8TAP_FN sharp_regular,  SHARP,   REGULAR
+PREP_8TAP_FN sharp,          SHARP,   SHARP
+PREP_8TAP_FN sharp_smooth,   SHARP,   SMOOTH
+
+cglobal prep_8tap, 3, 8, 0, tmp, src, stride, w, h, mx, my, stride3
+    imul                mxd, mxm, 0x010101
+    add                 mxd, t0d ; 8tap_h, mx, 4tap_h
+    imul                myd, mym, 0x010101
+    add                 myd, t1d ; 8tap_v, my, 4tap_v
+    lea                  r7, [prep_avx2]
+    movsxd               wq, wm
+    movifnidn            hd, hm
+    test                mxd, 0xf00
+    jnz .h
+    test                myd, 0xf00
+    jnz .v
+    tzcnt                wd, wd
+    movzx                wd, word [r7+wq*2+table_offset(prep,)]
+    add                  wq, r7
+    lea                  r6, [strideq*3]
+%if WIN64
+    pop                  r7
+%endif
+    jmp                  wq
+.h:
+    test                myd, 0xf00
+    jnz .hv
+    vbroadcasti128       m5, [subpel_h_shufA]
+    vpbroadcastd         m4, [pw_8192]
+    WIN64_SPILL_XMM      10
+    cmp                  wd, 4
+    je .h_w4
+    tzcnt                wd, wd
+    vbroadcasti128       m6, [subpel_h_shufB]
+    vbroadcasti128       m7, [subpel_h_shufC]
+    shr                 mxd, 16
+    sub                srcq, 3
+    movzx                wd, word [r7+wq*2+table_offset(prep, _8tap_h)]
+    vpbroadcastd         m8, [r7+mxq*8+subpel_filters-prep_avx2+0]
+    vpbroadcastd         m9, [r7+mxq*8+subpel_filters-prep_avx2+4]
+    add                  wq, r7
+    jmp                  wq
+.h_w4:
+    movzx               mxd, mxb
+    dec                srcq
+    vpbroadcastd         m3, [r7+mxq*8+subpel_filters-prep_avx2+2]
+    lea            stride3q, [strideq*3]
+.h_w4_loop:
+    movq                xm0, [srcq+strideq*0]
+    vpbroadcastq         m2, [srcq+strideq*2]
+    movq                xm1, [srcq+strideq*1]
+    vpblendd             m0, m0, m2, 0xf0
+    vpbroadcastq         m2, [srcq+stride3q ]
+    lea                srcq, [srcq+strideq*4]
+    vpblendd             m1, m1, m2, 0xf0
+    pshufb               m0, m5
+    pshufb               m1, m5
+    pmaddubsw            m0, m3
+    pmaddubsw            m1, m3
+    phaddw               m0, m1
+    pmulhrsw             m0, m4
+    mova             [tmpq], m0
+    add                tmpq, 32
+    sub                  hd, 4
+    jg .h_w4_loop
+    RET
+.h_w8:
+%macro PREP_8TAP_H 0
+    pshufb               m1, m0, m6
+    pshufb               m2, m0, m7
+    pshufb               m0, m5
+    pmaddubsw            m3, m1, m8
+    pmaddubsw            m1, m9
+    pmaddubsw            m2, m9
+    pmaddubsw            m0, m8
+    paddw                m2, m3
+    paddw                m0, m1
+    phaddw               m0, m2
+    pmulhrsw             m0, m4
+%endmacro
+    movu                xm0,     [srcq+strideq*0]
+    vinserti128          m0, m0, [srcq+strideq*1], 1
+    lea                srcq,     [srcq+strideq*2]
+    PREP_8TAP_H
+    mova             [tmpq], m0
+    add                tmpq, 32
+    sub                  hd, 2
+    jg .h_w8
+    RET
+.h_w16:
+    movu                xm0,     [srcq+strideq*0+8*0]
+    vinserti128          m0, m0, [srcq+strideq*0+8*1], 1
+    PREP_8TAP_H
+    mova        [tmpq+32*0], m0
+    movu                xm0,     [srcq+strideq*1+8*0]
+    vinserti128          m0, m0, [srcq+strideq*1+8*1], 1
+    lea                srcq, [srcq+strideq*2]
+    PREP_8TAP_H
+    mova        [tmpq+32*1], m0
+    add                tmpq, 64
+    sub                  hd, 2
+    jg .h_w16
+    RET
+.h_w32:
+    xor                 r6d, r6d
+    jmp .h_start
+.h_w64:
+    mov                  r6, -32*1
+    jmp .h_start
+.h_w128:
+    mov                  r6, -32*3
+.h_start:
+    sub                srcq, r6
+    mov                  r5, r6
+.h_loop:
+    movu                xm0,     [srcq+r6+8*0]
+    vinserti128          m0, m0, [srcq+r6+8*1], 1
+    PREP_8TAP_H
+    mova        [tmpq+32*0], m0
+    movu                xm0,     [srcq+r6+8*2]
+    vinserti128          m0, m0, [srcq+r6+8*3], 1
+    PREP_8TAP_H
+    mova        [tmpq+32*1], m0
+    add                tmpq, 64
+    add                  r6, 32
+    jle .h_loop
+    add                srcq, strideq
+    mov                  r6, r5
+    dec                  hd
+    jg .h_loop
+    RET
+.v:
+    %assign stack_offset stack_offset - stack_size_padded
+    WIN64_SPILL_XMM      16
+    movzx               mxd, myb ; Select 4-tap/8-tap filter multipliers.
+    shr                 myd, 16  ; Note that the code is 8-tap only, having
+    cmp                  hd, 4   ; a separate 4-tap code path for (4|8|16)x4
+    cmove               myd, mxd ; had a negligible effect on performance.
+    ; TODO: Would a 6-tap code path be worth it?
+    vpbroadcastd         m7, [pw_8192]
+    lea                 myq, [r7+myq*8+subpel_filters-prep_avx2]
+    vpbroadcastw         m8, [myq+0]
+    vpbroadcastw         m9, [myq+2]
+    vpbroadcastw        m10, [myq+4]
+    vpbroadcastw        m11, [myq+6]
+    lea            stride3q, [strideq*3]
+    sub                srcq, stride3q
+    cmp                  wd, 8
+    jg .v_w16
+    je .v_w8
+.v_w4:
+    movd                xm0, [srcq+strideq*0]
+    vpbroadcastd         m1, [srcq+strideq*2]
+    vpbroadcastd        xm2, [srcq+strideq*1]
+    vpbroadcastd         m3, [srcq+stride3q ]
+    lea                srcq, [srcq+strideq*4]
+    vpblendd             m1, m1, m0, 0x01 ; 0 2 2 _   2 _ _ _
+    vpblendd             m3, m3, m2, 0x03 ; 1 1 3 3   3 3 _ _
+    vpbroadcastd         m0, [srcq+strideq*0]
+    vpbroadcastd         m2, [srcq+strideq*1]
+    vpblendd             m1, m1, m0, 0x68 ; 0 2 2 4   2 4 4 _
+    vpbroadcastd         m0, [srcq+strideq*2]
+    vbroadcasti128       m6, [deint_shuf4]
+    vpblendd             m3, m3, m2, 0xc0 ; 1 1 3 3   3 3 5 5
+    vpblendd             m2, m3, m1, 0x55 ; 0 1 2 3   2 3 4 5
+    vpblendd             m3, m3, m1, 0xaa ; 1 2 3 4   3 4 5 _
+    punpcklbw            m1, m2, m3       ; 01  12    23  34
+    vpblendd             m3, m3, m0, 0x80 ; 1 2 3 4   3 4 5 6
+    punpckhbw            m2, m3           ; 23  34    45  56
+.v_w4_loop:
+    pinsrd              xm0, [srcq+stride3q ], 1
+    lea                srcq, [srcq+strideq*4]
+    vpbroadcastd         m3, [srcq+strideq*0]
+    vpbroadcastd         m4, [srcq+strideq*1]
+    vpblendd             m3, m3, m4, 0x20 ; _ _ 8 _   8 9 _ _
+    vpblendd             m3, m3, m0, 0x03 ; 6 7 8 _   8 9 _ _
+    vpbroadcastd         m0, [srcq+strideq*2]
+    vpblendd             m3, m3, m0, 0x40 ; 6 7 8 _   8 9 a _
+    pshufb               m3, m6           ; 67  78    89  9a
+    pmaddubsw            m4, m1, m8
+    vperm2i128           m1, m2, m3, 0x21 ; 45  56    67  78
+    pmaddubsw            m2, m9
+    paddw                m4, m2
+    mova                 m2, m3
+    pmaddubsw            m3, m11
+    paddw                m3, m4
+    pmaddubsw            m4, m1, m10
+    paddw                m3, m4
+    pmulhrsw             m3, m7
+    mova             [tmpq], m3
+    add                tmpq, 32
+    sub                  hd, 4
+    jg .v_w4_loop
+    RET
+.v_w8:
+    movq                xm1, [srcq+strideq*0]
+    vpbroadcastq         m4, [srcq+strideq*1]
+    vpbroadcastq         m2, [srcq+strideq*2]
+    vpbroadcastq         m5, [srcq+stride3q ]
+    lea                srcq, [srcq+strideq*4]
+    vpbroadcastq         m3, [srcq+strideq*0]
+    vpbroadcastq         m6, [srcq+strideq*1]
+    vpbroadcastq         m0, [srcq+strideq*2]
+    vpblendd             m1, m1, m4, 0x30
+    vpblendd             m4, m4, m2, 0x30
+    punpcklbw            m1, m4 ; 01 12
+    vpblendd             m2, m2, m5, 0x30
+    vpblendd             m5, m5, m3, 0x30
+    punpcklbw            m2, m5 ; 23 34
+    vpblendd             m3, m3, m6, 0x30
+    vpblendd             m6, m6, m0, 0x30
+    punpcklbw            m3, m6 ; 45 56
+.v_w8_loop:
+    vpbroadcastq         m4, [srcq+stride3q ]
+    lea                srcq, [srcq+strideq*4]
+    pmaddubsw            m5, m2, m9  ; a1
+    pmaddubsw            m6, m2, m8  ; b0
+    vpblendd             m2, m0, m4, 0x30
+    vpbroadcastq         m0, [srcq+strideq*0]
+    vpblendd             m4, m4, m0, 0x30
+    punpcklbw            m2, m4      ; 67 78
+    pmaddubsw            m1, m8      ; a0
+    pmaddubsw            m4, m3, m9  ; b1
+    paddw                m5, m1
+    mova                 m1, m3
+    pmaddubsw            m3, m10     ; a2
+    paddw                m6, m4
+    paddw                m5, m3
+    vpbroadcastq         m4, [srcq+strideq*1]
+    vpblendd             m3, m0, m4, 0x30
+    vpbroadcastq         m0, [srcq+strideq*2]
+    vpblendd             m4, m4, m0, 0x30
+    punpcklbw            m3, m4      ; 89 9a
+    pmaddubsw            m4, m2, m11 ; a3
+    paddw                m5, m4
+    pmaddubsw            m4, m2, m10 ; b2
+    paddw                m6, m4
+    pmaddubsw            m4, m3, m11 ; b3
+    paddw                m6, m4
+    pmulhrsw             m5, m7
+    pmulhrsw             m6, m7
+    mova        [tmpq+32*0], m5
+    mova        [tmpq+32*1], m6
+    add                tmpq, 32*2
+    sub                  hd, 4
+    jg .v_w8_loop
+    RET
+.v_w16:
+    lea                 r6d, [wq-16]
+    mov                  r5, tmpq
+    mov                  r7, srcq
+    shl                 r6d, 4
+    mov                 r6b, hb
+.v_w16_loop0:
+    vbroadcasti128       m4, [srcq+strideq*0]
+    vbroadcasti128       m5, [srcq+strideq*1]
+    lea                srcq, [srcq+strideq*2]
+    vbroadcasti128       m0, [srcq+strideq*1]
+    vbroadcasti128       m6, [srcq+strideq*0]
+    lea                srcq, [srcq+strideq*2]
+    vbroadcasti128       m1, [srcq+strideq*0]
+    vbroadcasti128       m2, [srcq+strideq*1]
+    lea                srcq, [srcq+strideq*2]
+    vbroadcasti128       m3, [srcq+strideq*0]
+    shufpd               m4, m4, m0, 0x0c
+    shufpd               m5, m5, m1, 0x0c
+    punpcklbw            m1, m4, m5 ; 01
+    punpckhbw            m4, m5     ; 34
+    shufpd               m6, m6, m2, 0x0c
+    punpcklbw            m2, m5, m6 ; 12
+    punpckhbw            m5, m6     ; 45
+    shufpd               m0, m0, m3, 0x0c
+    punpcklbw            m3, m6, m0 ; 23
+    punpckhbw            m6, m0     ; 56
+.v_w16_loop:
+    vbroadcasti128      m12, [srcq+strideq*1]
+    lea                srcq, [srcq+strideq*2]
+    vbroadcasti128      m13, [srcq+strideq*0]
+    pmaddubsw           m14, m1, m8  ; a0
+    pmaddubsw           m15, m2, m8  ; b0
+    mova                 m1, m3
+    mova                 m2, m4
+    pmaddubsw            m3, m9      ; a1
+    pmaddubsw            m4, m9      ; b1
+    paddw               m14, m3
+    paddw               m15, m4
+    mova                 m3, m5
+    mova                 m4, m6
+    pmaddubsw            m5, m10     ; a2
+    pmaddubsw            m6, m10     ; b2
+    paddw               m14, m5
+    paddw               m15, m6
+    shufpd               m6, m0, m12, 0x0d
+    shufpd               m0, m12, m13, 0x0c
+    punpcklbw            m5, m6, m0  ; 67
+    punpckhbw            m6, m0      ; 78
+    pmaddubsw           m12, m5, m11 ; a3
+    pmaddubsw           m13, m6, m11 ; b3
+    paddw               m14, m12
+    paddw               m15, m13
+    pmulhrsw            m14, m7
+    pmulhrsw            m15, m7
+    mova        [tmpq+wq*0], m14
+    mova        [tmpq+wq*2], m15
+    lea                tmpq, [tmpq+wq*4]
+    sub                  hd, 2
+    jg .v_w16_loop
+    mov                  hb, r6b
+    add                  r5, 32
+    add                  r7, 16
+    mov                tmpq, r5
+    mov                srcq, r7
+    sub                 r6d, 1<<8
+    jg .v_w16_loop0
+    RET
+.hv:
+    %assign stack_offset stack_offset - stack_size_padded
+    WIN64_SPILL_XMM      16
+    cmp                  wd, 4
+    jg .hv_w8
+    movzx               mxd, mxb
+    dec                srcq
+    mova                 m7, [subpel_h_shuf4]
+    vpbroadcastd         m8, [r7+mxq*8+subpel_filters-prep_avx2+2]
+    pmovzxbd             m9, [deint_shuf4]
+    movzx               mxd, myb
+    shr                 myd, 16
+    cmp                  hd, 4
+    cmove               myd, mxd
+    vpbroadcastq         m0, [r7+myq*8+subpel_filters-prep_avx2]
+    lea            stride3q, [strideq*3]
+    sub                srcq, stride3q
+    punpcklbw            m0, m0
+    psraw                m0, 8 ; sign-extend
+    vpbroadcastd        m10, [pw_8192]
+    vpbroadcastd        m11, [pd_32]
+    pshufd              m12, m0, q0000
+    pshufd              m13, m0, q1111
+    pshufd              m14, m0, q2222
+    pshufd              m15, m0, q3333
+    vpbroadcastq         m2, [srcq+strideq*0]
+    vpbroadcastq         m4, [srcq+strideq*1]
+    vpbroadcastq         m0, [srcq+strideq*2]
+    vpbroadcastq         m5, [srcq+stride3q ]
+    lea                srcq, [srcq+strideq*4]
+    vpbroadcastq         m3, [srcq+strideq*0]
+    vpbroadcastq         m6, [srcq+strideq*1]
+    vpbroadcastq         m1, [srcq+strideq*2]
+    vpblendd             m2, m2, m4, 0xcc ; 0 1
+    vpblendd             m0, m0, m5, 0xcc ; 2 3
+    vpblendd             m3, m3, m6, 0xcc ; 4 5
+    pshufb               m2, m7
+    pshufb               m0, m7
+    pshufb               m3, m7
+    pshufb               m1, m7
+    pmaddubsw            m2, m8
+    pmaddubsw            m0, m8
+    pmaddubsw            m3, m8
+    pmaddubsw            m1, m8
+    phaddw               m2, m0
+    phaddw               m3, m1
+    pmulhrsw             m2, m10
+    pmulhrsw             m3, m10
+    palignr              m4, m3, m2, 4
+    punpcklwd            m1, m2, m4  ; 01 12
+    punpckhwd            m2, m4      ; 23 34
+    pshufd               m0, m3, q2121
+    punpcklwd            m3, m0      ; 45 56
+.hv_w4_loop:
+    pmaddwd              m5, m1, m12 ; a0 b0
+    pmaddwd              m6, m2, m12 ; c0 d0
+    pmaddwd              m2, m13     ; a1 b1
+    pmaddwd              m4, m3, m13 ; c1 d1
+    mova                 m1, m3
+    pmaddwd              m3, m14     ; a2 b2
+    paddd                m5, m2
+    vpbroadcastq         m2, [srcq+stride3q ]
+    lea                srcq, [srcq+strideq*4]
+    paddd                m6, m4
+    paddd                m5, m3
+    vpbroadcastq         m4, [srcq+strideq*0]
+    vpbroadcastq         m3, [srcq+strideq*1]
+    vpblendd             m2, m2, m4, 0xcc
+    vpbroadcastq         m4, [srcq+strideq*2]
+    vpblendd             m3, m3, m4, 0xcc
+    pshufb               m2, m7
+    pshufb               m3, m7
+    pmaddubsw            m2, m8
+    pmaddubsw            m3, m8
+    phaddw               m2, m3
+    pmulhrsw             m2, m10
+    palignr              m3, m2, m0, 12
+    mova                 m0, m2
+    punpcklwd            m2, m3, m0  ; 67 78
+    punpckhwd            m3, m0      ; 89 9a
+    pmaddwd              m4, m2, m14 ; c2 d2
+    paddd                m6, m11
+    paddd                m5, m11
+    paddd                m6, m4
+    pmaddwd              m4, m2, m15 ; a3 b3
+    paddd                m5, m4
+    pmaddwd              m4, m3, m15 ; c3 d3
+    paddd                m6, m4
+    psrad                m5, 6
+    psrad                m6, 6
+    packssdw             m5, m6
+    vpermd               m5, m9, m5
+    mova             [tmpq], m5
+    add                tmpq, 32
+    sub                  hd, 4
+    jg .hv_w4_loop
+    RET
+.hv_w8:
+    shr                 mxd, 16
+    sub                srcq, 3
+    vpbroadcastd        m10, [r7+mxq*8+subpel_filters-prep_avx2+0]
+    vpbroadcastd        m11, [r7+mxq*8+subpel_filters-prep_avx2+4]
+    movzx               mxd, myb
+    shr                 myd, 16
+    cmp                  hd, 4
+    cmove               myd, mxd
+    vpbroadcastq         m0, [r7+myq*8+subpel_filters-prep_avx2]
+    lea            stride3q, [strideq*3]
+    sub                srcq, stride3q
+    punpcklbw            m0, m0
+    psraw                m0, 8 ; sign-extend
+    pshufd              m12, m0, q0000
+    pshufd              m13, m0, q1111
+    pshufd              m14, m0, q2222
+    pshufd              m15, m0, q3333
+    lea                 r6d, [wq-8]
+    mov                  r5, tmpq
+    mov                  r7, srcq
+    shl                 r6d, 5
+    mov                 r6b, hb
+.hv_w8_loop0:
+    vbroadcasti128       m7, [subpel_h_shufA]
+    vbroadcasti128       m8, [subpel_h_shufB]
+    vbroadcasti128       m9, [subpel_h_shufC]
+    movu                xm4,     [srcq+strideq*0]
+    movu                xm5,     [srcq+strideq*1]
+    lea                srcq,     [srcq+strideq*2]
+    movu                xm6,     [srcq+strideq*0]
+    vbroadcasti128       m0,     [srcq+strideq*1]
+    lea                srcq,     [srcq+strideq*2]
+    vpblendd             m4, m4, m0, 0xf0            ; 0 3
+    vinserti128          m5, m5, [srcq+strideq*0], 1 ; 1 4
+    vinserti128          m6, m6, [srcq+strideq*1], 1 ; 2 5
+    lea                srcq,     [srcq+strideq*2]
+    vinserti128          m0, m0, [srcq+strideq*0], 1 ; 3 6
+    HV_H_W8              m4, m1, m2, m3, m7, m8, m9
+    HV_H_W8              m5, m1, m2, m3, m7, m8, m9
+    HV_H_W8              m6, m1, m2, m3, m7, m8, m9
+    HV_H_W8              m0, m1, m2, m3, m7, m8, m9
+    vpbroadcastd         m7, [pw_8192]
+    vpermq               m4, m4, q3120
+    vpermq               m5, m5, q3120
+    vpermq               m6, m6, q3120
+    pmulhrsw             m0, m7
+    pmulhrsw             m4, m7
+    pmulhrsw             m5, m7
+    pmulhrsw             m6, m7
+    vpermq               m7, m0, q3120
+    punpcklwd            m1, m4, m5  ; 01
+    punpckhwd            m4, m5      ; 34
+    punpcklwd            m2, m5, m6  ; 12
+    punpckhwd            m5, m6      ; 45
+    punpcklwd            m3, m6, m7  ; 23
+    punpckhwd            m6, m7      ; 56
+.hv_w8_loop:
+    vextracti128     [tmpq], m0, 1 ; not enough registers
+    movu                xm0,     [srcq+strideq*1]
+    lea                srcq,     [srcq+strideq*2]
+    vinserti128          m0, m0, [srcq+strideq*0], 1 ; 7 8
+    pmaddwd              m8, m1, m12 ; a0
+    pmaddwd              m9, m2, m12 ; b0
+    mova                 m1, m3
+    mova                 m2, m4
+    pmaddwd              m3, m13     ; a1
+    pmaddwd              m4, m13     ; b1
+    paddd                m8, m3
+    paddd                m9, m4
+    mova                 m3, m5
+    mova                 m4, m6
+    pmaddwd              m5, m14     ; a2
+    pmaddwd              m6, m14     ; b2
+    paddd                m8, m5
+    paddd                m9, m6
+    vbroadcasti128       m6, [subpel_h_shufB]
+    vbroadcasti128       m7, [subpel_h_shufC]
+    vbroadcasti128       m5, [subpel_h_shufA]
+    HV_H_W8              m0, m5, m6, m7, m5, m6, m7
+    vpbroadcastd         m5, [pw_8192]
+    vpbroadcastd         m7, [pd_32]
+    vbroadcasti128       m6, [tmpq]
+    pmulhrsw             m0, m5
+    paddd                m8, m7
+    paddd                m9, m7
+    vpermq               m7, m0, q3120    ; 7 8
+    shufpd               m6, m6, m7, 0x04 ; 6 7
+    punpcklwd            m5, m6, m7  ; 67
+    punpckhwd            m6, m7      ; 78
+    pmaddwd              m7, m5, m15 ; a3
+    paddd                m8, m7
+    pmaddwd              m7, m6, m15 ; b3
+    paddd                m7, m9
+    psrad                m8, 6
+    psrad                m7, 6
+    packssdw             m8, m7
+    vpermq               m7, m8, q3120
+    mova         [tmpq+wq*0], xm7
+    vextracti128 [tmpq+wq*2], m7, 1
+    lea                tmpq, [tmpq+wq*4]
+    sub                  hd, 2
+    jg .hv_w8_loop
+    mov                  hb, r6b
+    add                  r5, 16
+    add                  r7, 8
+    mov                tmpq, r5
+    mov                srcq, r7
+    sub                 r6d, 1<<8
+    jg .hv_w8_loop0
+    RET
+
+%macro BIDIR_FN 1 ; op
+    %1                    0
+    lea            stride3q, [strideq*3]
+    jmp                  wq
+.w4:
+    vextracti128        xm1, m0, 1
+    movd   [dstq          ], xm0
+    pextrd [dstq+strideq*1], xm0, 1
+    movd   [dstq+strideq*2], xm1
+    pextrd [dstq+stride3q ], xm1, 1
+    cmp                  hd, 4
+    je .ret
+    lea                dstq, [dstq+strideq*4]
+    pextrd [dstq          ], xm0, 2
+    pextrd [dstq+strideq*1], xm0, 3
+    pextrd [dstq+strideq*2], xm1, 2
+    pextrd [dstq+stride3q ], xm1, 3
+    cmp                  hd, 8
+    je .ret
+    %1                    2
+    lea                dstq, [dstq+strideq*4]
+    vextracti128        xm1, m0, 1
+    movd   [dstq          ], xm0
+    pextrd [dstq+strideq*1], xm0, 1
+    movd   [dstq+strideq*2], xm1
+    pextrd [dstq+stride3q], xm1, 1
+    lea                dstq, [dstq+strideq*4]
+    pextrd [dstq          ], xm0, 2
+    pextrd [dstq+strideq*1], xm0, 3
+    pextrd [dstq+strideq*2], xm1, 2
+    pextrd [dstq+stride3q ], xm1, 3
+.ret:
+    RET
+.w8_loop:
+    %1_INC_PTR            2
+    %1                    0
+    lea                dstq, [dstq+strideq*4]
+.w8:
+    vextracti128        xm1, m0, 1
+    movq   [dstq          ], xm0
+    movq   [dstq+strideq*1], xm1
+    movhps [dstq+strideq*2], xm0
+    movhps [dstq+stride3q ], xm1
+    sub                  hd, 4
+    jg .w8_loop
+    RET
+.w16_loop:
+    %1_INC_PTR            4
+    %1                    0
+    lea                dstq, [dstq+strideq*4]
+.w16:
+    vpermq               m0, m0, q3120
+    mova         [dstq          ], xm0
+    vextracti128 [dstq+strideq*1], m0, 1
+    %1                    2
+    vpermq               m0, m0, q3120
+    mova         [dstq+strideq*2], xm0
+    vextracti128 [dstq+stride3q ], m0, 1
+    sub                  hd, 4
+    jg .w16_loop
+    RET
+.w32_loop:
+    %1_INC_PTR            4
+    %1                    0
+    lea                dstq, [dstq+strideq*2]
+.w32:
+    vpermq               m0, m0, q3120
+    mova             [dstq], m0
+    %1                    2
+    vpermq               m0, m0, q3120
+    mova   [dstq+strideq*1], m0
+    sub                  hd, 2
+    jg .w32_loop
+    RET
+.w64_loop:
+    %1_INC_PTR            4
+    %1                    0
+    add                dstq, strideq
+.w64:
+    vpermq               m0, m0, q3120
+    mova             [dstq], m0
+    %1                    2
+    vpermq               m0, m0, q3120
+    mova          [dstq+32], m0
+    dec                  hd
+    jg .w64_loop
+    RET
+.w128_loop:
+    %1                    0
+    add                dstq, strideq
+.w128:
+    vpermq               m0, m0, q3120
+    mova        [dstq+0*32], m0
+    %1                    2
+    vpermq               m0, m0, q3120
+    mova        [dstq+1*32], m0
+    %1_INC_PTR            8
+    %1                   -4
+    vpermq               m0, m0, q3120
+    mova        [dstq+2*32], m0
+    %1                   -2
+    vpermq               m0, m0, q3120
+    mova        [dstq+3*32], m0
+    dec                  hd
+    jg .w128_loop
+    RET
+%endmacro
+
+%macro AVG 1 ; src_offset
+    mova                 m0, [tmp1q+(%1+0)*mmsize]
+    paddw                m0, [tmp2q+(%1+0)*mmsize]
+    mova                 m1, [tmp1q+(%1+1)*mmsize]
+    paddw                m1, [tmp2q+(%1+1)*mmsize]
+    pmulhrsw             m0, m2
+    pmulhrsw             m1, m2
+    packuswb             m0, m1
+%endmacro
+
+%macro AVG_INC_PTR 1
+    add               tmp1q, %1*mmsize
+    add               tmp2q, %1*mmsize
+%endmacro
+
+cglobal avg, 4, 7, 3, dst, stride, tmp1, tmp2, w, h, stride3
+    lea                  r6, [avg_avx2_table]
+    tzcnt                wd, wm
+    movifnidn            hd, hm
+    movsxd               wq, dword [r6+wq*4]
+    vpbroadcastd         m2, [pw_1024+r6-avg_avx2_table]
+    add                  wq, r6
+    BIDIR_FN            AVG
+
+%macro W_AVG 1 ; src_offset
+    ; (a * weight + b * (16 - weight) + 128) >> 8
+    ; = ((a - b) * weight + (b << 4) + 128) >> 8
+    ; = ((((b - a) * (-weight << 12)) >> 16) + b + 8) >> 4
+    mova                 m0,     [tmp2q+(%1+0)*mmsize]
+    psubw                m2, m0, [tmp1q+(%1+0)*mmsize]
+    mova                 m1,     [tmp2q+(%1+1)*mmsize]
+    psubw                m3, m1, [tmp1q+(%1+1)*mmsize]
+    paddw                m2, m2 ; compensate for the weight only being half
+    paddw                m3, m3 ; of what it should be
+    pmulhw               m2, m4
+    pmulhw               m3, m4
+    paddw                m0, m2
+    paddw                m1, m3
+    pmulhrsw             m0, m5
+    pmulhrsw             m1, m5
+    packuswb             m0, m1
+%endmacro
+
+%define W_AVG_INC_PTR AVG_INC_PTR
+
+cglobal w_avg, 4, 7, 6, dst, stride, tmp1, tmp2, w, h, stride3
+    lea                  r6, [w_avg_avx2_table]
+    tzcnt                wd, wm
+    movifnidn            hd, hm
+    vpbroadcastw         m0, r6m ; weight
+    movsxd               wq, dword [r6+wq*4]
+    pxor                 m4, m4
+    psllw                m0, 11 ; can't shift by 12, sign bit must be preserved
+    psubw                m4, m0
+    vpbroadcastd         m5, [pw_2048+r6-w_avg_avx2_table]
+    add                  wq, r6
+    BIDIR_FN          W_AVG
+
+%macro MASK 1 ; src_offset
+    ; (a * m + b * (64 - m) + 512) >> 10
+    ; = ((a - b) * m + (b << 6) + 512) >> 10
+    ; = ((((b - a) * (-m << 10)) >> 16) + b + 8) >> 4
+    vpermq               m3,     [maskq+(%1+0)*(mmsize/2)], q3120
+    mova                 m0,     [tmp2q+(%1+0)*mmsize]
+    psubw                m1, m0, [tmp1q+(%1+0)*mmsize]
+    psubb                m3, m4, m3
+    paddw                m1, m1     ; (b - a) << 1
+    paddb                m3, m3
+    punpcklbw            m2, m4, m3 ; -m << 9
+    pmulhw               m1, m2
+    paddw                m0, m1
+    mova                 m1,     [tmp2q+(%1+1)*mmsize]
+    psubw                m2, m1, [tmp1q+(%1+1)*mmsize]
+    paddw                m2, m2
+    punpckhbw            m3, m4, m3
+    pmulhw               m2, m3
+    paddw                m1, m2
+    pmulhrsw             m0, m5
+    pmulhrsw             m1, m5
+    packuswb             m0, m1
+%endmacro
+
+%macro MASK_INC_PTR 1
+    add               maskq, %1*mmsize/2
+    add               tmp1q, %1*mmsize
+    add               tmp2q, %1*mmsize
+%endmacro
+
+cglobal mask, 4, 8, 6, dst, stride, tmp1, tmp2, w, h, mask, stride3
+    lea                  r7, [mask_avx2_table]
+    tzcnt                wd, wm
+    movifnidn            hd, hm
+    mov               maskq, maskmp
+    movsxd               wq, dword [r7+wq*4]
+    pxor                 m4, m4
+    vpbroadcastd         m5, [pw_2048+r7-mask_avx2_table]
+    add                  wq, r7
+    BIDIR_FN           MASK
+
+%macro W_MASK_420 2 ; src_offset, mask_out
+    mova                 m0, [tmp1q+(%1+0)*mmsize]
+    mova                 m1, [tmp2q+(%1+0)*mmsize]
+    psubw                m1, m0
+    pabsw               m%2, m1
+    paddw               m%2, m6
+    psrlw               m%2, 8       ; (abs(tmp1 - tmp2) + 8) >> 8
+    psubusw             m%2, m7, m%2 ; 64 - min(m, 64)
+    psllw                m2, m%2, 10
+    pmulhw               m1, m2
+    paddw                m0, m1
+    mova                 m1, [tmp1q+(%1+1)*mmsize]
+    mova                 m2, [tmp2q+(%1+1)*mmsize]
+    psubw                m2, m1
+    pabsw                m3, m2
+    paddw                m3, m6
+    psrlw                m3, 8
+    psubusw              m3, m7, m3
+    phaddw              m%2, m3
+    psllw                m3, 10
+    pmulhw               m2, m3
+    paddw                m1, m2
+    pmulhrsw             m0, m8
+    pmulhrsw             m1, m8
+    packuswb             m0, m1
+%endmacro
+
+cglobal w_mask_420, 4, 8, 15, dst, stride, tmp1, tmp2, w, h, mask, stride3
+    lea                  r7, [w_mask_420_avx2_table]
+    tzcnt                wd, wm
+    movifnidn            hd, hm
+    mov               maskq, maskmp
+    vpbroadcastw         m0, r7m ; sign
+    movsxd               wq, dword [r7+wq*4]
+    vpbroadcastd         m6, [pw_8       +r7-w_mask_420_avx2_table]
+    vpbroadcastd         m7, [pw_26      +r7-w_mask_420_avx2_table] ; 64 - 38
+    vpbroadcastd         m8, [pw_2048    +r7-w_mask_420_avx2_table]
+    vpbroadcastd         m9, [pw_258     +r7-w_mask_420_avx2_table] ; 64 * 4 + 2
+    pmovzxbd            m10, [deint_shuf4+r7-w_mask_420_avx2_table]
+    psubw                m9, m0
+    add                  wq, r7
+    W_MASK_420            0, 4
+    lea            stride3q, [strideq*3]
+    jmp                  wq
+.w4:
+    vextracti128        xm1, m0, 1
+    movd   [dstq          ], xm0
+    pextrd [dstq+strideq*1], xm0, 1
+    movd   [dstq+strideq*2], xm1
+    pextrd [dstq+stride3q ], xm1, 1
+    cmp                  hd, 4
+    je .w4_end
+    lea                dstq, [dstq+strideq*4]
+    pextrd [dstq          ], xm0, 2
+    pextrd [dstq+strideq*1], xm0, 3
+    pextrd [dstq+strideq*2], xm1, 2
+    pextrd [dstq+stride3q ], xm1, 3
+    cmp                  hd, 8
+    jg .w4_h16
+.w4_end:
+    vextracti128        xm0, m4, 1
+    vpblendd            xm1, xm4, xm0, 0x05
+    vpblendd            xm4, xm4, xm0, 0x0a
+    pshufd              xm1, xm1, q2301
+    psubw               xm4, xm9, xm4
+    psubw               xm4, xm1
+    psrlw               xm4, 2
+    packuswb            xm4, xm4
+    movq            [maskq], xm4
+    RET
+.w4_h16:
+    W_MASK_420            2, 5
+    lea                dstq, [dstq+strideq*4]
+    phaddd               m4, m5
+    vextracti128        xm1, m0, 1
+    psubw                m4, m9, m4
+    psrlw                m4, 2
+    vpermd               m4, m10, m4
+    vextracti128        xm5, m4, 1
+    packuswb            xm4, xm5
+    movd   [dstq          ], xm0
+    pextrd [dstq+strideq*1], xm0, 1
+    movd   [dstq+strideq*2], xm1
+    pextrd [dstq+stride3q], xm1, 1
+    lea                dstq, [dstq+strideq*4]
+    pextrd [dstq          ], xm0, 2
+    pextrd [dstq+strideq*1], xm0, 3
+    pextrd [dstq+strideq*2], xm1, 2
+    pextrd [dstq+stride3q ], xm1, 3
+    mova            [maskq], xm4
+    RET
+.w8_loop:
+    add               tmp1q, 2*32
+    add               tmp2q, 2*32
+    W_MASK_420            0, 4
+    lea                dstq, [dstq+strideq*4]
+    add               maskq, 8
+.w8:
+    vextracti128        xm2, m4, 1
+    vextracti128        xm1, m0, 1
+    psubw               xm4, xm9, xm4
+    psubw               xm4, xm2
+    psrlw               xm4, 2
+    packuswb            xm4, xm4
+    movq   [dstq          ], xm0
+    movq   [dstq+strideq*1], xm1
+    movhps [dstq+strideq*2], xm0
+    movhps [dstq+stride3q ], xm1
+    movq            [maskq], xm4
+    sub                  hd, 4
+    jg .w8_loop
+    RET
+.w16_loop:
+    add               tmp1q, 4*32
+    add               tmp2q, 4*32
+    W_MASK_420            0, 4
+    lea                dstq, [dstq+strideq*4]
+    add               maskq, 16
+.w16:
+    vpermq               m0, m0, q3120
+    mova         [dstq          ], xm0
+    vextracti128 [dstq+strideq*1], m0, 1
+    W_MASK_420            2, 5
+    punpckhqdq           m1, m4, m5
+    punpcklqdq           m4, m5
+    psubw                m1, m9, m1
+    psubw                m1, m4
+    psrlw                m1, 2
+    vpermq               m0, m0, q3120
+    packuswb             m1, m1
+    vpermd               m1, m10, m1
+    mova         [dstq+strideq*2], xm0
+    vextracti128 [dstq+stride3q ], m0, 1
+    mova            [maskq], xm1
+    sub                  hd, 4
+    jg .w16_loop
+    RET
+.w32_loop:
+    add               tmp1q, 4*32
+    add               tmp2q, 4*32
+    W_MASK_420            0, 4
+    lea                dstq, [dstq+strideq*2]
+    add               maskq, 16
+.w32:
+    vpermq               m0, m0, q3120
+    mova             [dstq], m0
+    W_MASK_420            2, 5
+    psubw                m4, m9, m4
+    psubw                m4, m5
+    psrlw                m4, 2
+    vpermq               m0, m0, q3120
+    packuswb             m4, m4
+    vpermd               m4, m10, m4
+    mova   [dstq+strideq*1], m0
+    mova            [maskq], xm4
+    sub                  hd, 2
+    jg .w32_loop
+    RET
+.w64_loop_even:
+    psubw               m11, m9, m4
+    psubw               m12, m9, m5
+    dec                  hd
+.w64_loop:
+    add               tmp1q, 4*32
+    add               tmp2q, 4*32
+    W_MASK_420            0, 4
+    add                dstq, strideq
+.w64:
+    vpermq               m0, m0, q3120
+    mova             [dstq], m0
+    W_MASK_420            2, 5
+    vpermq               m0, m0, q3120
+    mova          [dstq+32], m0
+    test                 hd, 1
+    jz .w64_loop_even
+    psubw                m4, m11, m4
+    psubw                m5, m12, m5
+    psrlw                m4, 2
+    psrlw                m5, 2
+    packuswb             m4, m5
+    vpermd               m4, m10, m4
+    mova            [maskq], m4
+    add               maskq, 32
+    dec                  hd
+    jg .w64_loop
+    RET
+.w128_loop_even:
+    psubw               m13, m9, m4
+    psubw               m14, m9, m5
+    dec                  hd
+.w128_loop:
+    W_MASK_420            0, 4
+    add                dstq, strideq
+.w128:
+    vpermq               m0, m0, q3120
+    mova        [dstq+0*32], m0
+    W_MASK_420            2, 5
+    vpermq               m0, m0, q3120
+    mova        [dstq+1*32], m0
+    add               tmp1q, 8*32
+    add               tmp2q, 8*32
+    test                 hd, 1
+    jz .w128_even
+    psubw                m4, m11, m4
+    psubw                m5, m12, m5
+    psrlw                m4, 2
+    psrlw                m5, 2
+    packuswb             m4, m5
+    vpermd               m4, m10, m4
+    mova            [maskq], m4
+    jmp .w128_odd
+.w128_even:
+    psubw               m11, m9, m4
+    psubw               m12, m9, m5
+.w128_odd:
+    W_MASK_420           -4, 4
+    vpermq               m0, m0, q3120
+    mova        [dstq+2*32], m0
+    W_MASK_420           -2, 5
+    vpermq               m0, m0, q3120
+    mova        [dstq+3*32], m0
+    test                 hd, 1
+    jz .w128_loop_even
+    psubw                m4, m13, m4
+    psubw                m5, m14, m5
+    psrlw                m4, 2
+    psrlw                m5, 2
+    packuswb             m4, m5
+    vpermd               m4, m10, m4
+    mova         [maskq+32], m4
+    add               maskq, 64
+    dec                  hd
+    jg .w128_loop
+    RET
+
+%endif ; ARCH_X86_64