shithub: openh264

Download patch

ref: 60c7e832f335efc9de5b598ab9b7f35b98035adf
parent: 482387bd5158dd967b986b3fac8705858218a7ab
author: gxw <guxiwei-hf@loongson.cn>
date: Mon May 18 11:30:34 EDT 2020

codec/common: [MIPS] Add msa deblock and copy_mb optimization.

Add following msa optimization files:
1. codec/common/mips/copy_mb_msa.c
2. codec/common/mips/deblock_msa.c

--- a/codec/common/inc/copy_mb.h
+++ b/codec/common/inc/copy_mb.h
@@ -82,6 +82,13 @@
 void WelsCopy16x16_mmi (uint8_t* Dst, int32_t  iStrideD, uint8_t* Src, int32_t  iStrideS);
 void WelsCopy16x16NotAligned_mmi (uint8_t* Dst, int32_t  iStrideD, uint8_t* Src, int32_t  iStrideS);
 #endif//HAVE_MMI
+
+#if defined (HAVE_MSA)
+void WelsCopy8x8_msa (uint8_t* pDst, int32_t iStrideD, uint8_t* pSrc, int32_t iStrideS);
+void WelsCopy8x16_msa (uint8_t* pDst, int32_t iStrideD, uint8_t* pSrc, int32_t iStrideS);
+void WelsCopy16x8_msa (uint8_t* Dst, int32_t  iStrideD, uint8_t* Src, int32_t  iStrideS);
+void WelsCopy16x16_msa (uint8_t* Dst, int32_t  iStrideD, uint8_t* Src, int32_t  iStrideS);
+#endif//HAVE_MSA
 #if defined(__cplusplus)
 }
 #endif//__cplusplus
--- a/codec/common/inc/deblocking_common.h
+++ b/codec/common/inc/deblocking_common.h
@@ -91,6 +91,20 @@
                             int8_t* pTC);
 void WelsNonZeroCount_mmi (int8_t* pNonZeroCount);
 #endif//HAVE_MMI
+
+#if defined(HAVE_MSA)
+void DeblockLumaLt4V_msa (uint8_t* pPixY, int32_t iStride, int32_t iAlpha, int32_t iBeta, int8_t* pTc);
+void DeblockLumaEq4V_msa (uint8_t* pPixY, int32_t iStride, int32_t iAlpha, int32_t iBeta);
+void DeblockLumaLt4H_msa (uint8_t* pPixY, int32_t iStride, int32_t iAlpha, int32_t iBeta, int8_t* pTc);
+void DeblockLumaEq4H_msa (uint8_t* pPixY, int32_t iStride, int32_t iAlpha, int32_t iBeta);
+void DeblockChromaEq4V_msa (uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride, int32_t iAlpha, int32_t iBeta);
+void DeblockChromaLt4V_msa (uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride, int32_t iAlpha, int32_t iBeta,
+                            int8_t* pTC);
+void DeblockChromaEq4H_msa (uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride, int32_t iAlpha, int32_t iBeta);
+void DeblockChromaLt4H_msa (uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride, int32_t iAlpha, int32_t iBeta,
+                            int8_t* pTC);
+void WelsNonZeroCount_msa (int8_t* pNonZeroCount);
+#endif//HAVE_MSA
 #if defined(__cplusplus)
 }
 #endif//__cplusplus
--- /dev/null
+++ b/codec/common/inc/msa_macros.h
@@ -1,0 +1,2393 @@
+/*
+ * Copyright © 2020 Loongson Technology Co. Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author:  Yin Shiyou (yinshiyou-hf@loongson.cn)
+ *          Gu  Xiwei  (guxiwei-hf@loongson.cn)
+ */
+
+/*
+ * This header file is copied from loongson LSOM project.
+ * MSA macros is implemented with msa intrinsics in msa.h,
+ * and used for simplifing MSA optimization.
+ */
+
+#ifndef _MSA_MACROS_H
+#define _MSA_MACROS_H 1
+#define MSA_MACROS_VERSION 18
+#include <msa.h>
+
+#if (__mips_isa_rev >= 6)
+    #define LH(psrc)                              \
+    ( {                                           \
+        uint16_t val_lh_m = *(uint16_t *)(psrc);  \
+        val_lh_m;                                 \
+    } )
+
+    #define LW(psrc)                              \
+    ( {                                           \
+        uint32_t val_lw_m = *(uint32_t *)(psrc);  \
+        val_lw_m;                                 \
+    } )
+
+    #if (__mips == 64)
+        #define LD(psrc)                               \
+        ( {                                            \
+            uint64_t val_ld_m =  *(uint64_t *)(psrc);  \
+            val_ld_m;                                  \
+        } )
+    #else  // !(__mips == 64)
+        #define LD(psrc)                                                    \
+        ( {                                                                 \
+            uint8_t *psrc_ld_m = (uint8_t *) (psrc);                        \
+            uint32_t val0_ld_m, val1_ld_m;                                  \
+            uint64_t val_ld_m = 0;                                          \
+                                                                            \
+            val0_ld_m = LW(psrc_ld_m);                                      \
+            val1_ld_m = LW(psrc_ld_m + 4);                                  \
+                                                                            \
+            val_ld_m = (uint64_t) (val1_ld_m);                              \
+            val_ld_m = (uint64_t) ((val_ld_m << 32) & 0xFFFFFFFF00000000);  \
+            val_ld_m = (uint64_t) (val_ld_m | (uint64_t) val0_ld_m);        \
+                                                                            \
+            val_ld_m;                                                       \
+        } )
+    #endif  // (__mips == 64)
+
+    #define SH(val, pdst)  *(uint16_t *)(pdst) = (val);
+    #define SW(val, pdst)  *(uint32_t *)(pdst) = (val);
+    #define SD(val, pdst)  *(uint64_t *)(pdst) = (val);
+
+#else  // !(__mips_isa_rev >= 6)
+    #define LH(psrc)                                 \
+    ( {                                              \
+        uint8_t *psrc_lh_m = (uint8_t *) (psrc);     \
+        uint16_t val_lh_m;                           \
+                                                     \
+        __asm__ volatile (                           \
+            "ulh  %[val_lh_m],  %[psrc_lh_m]  \n\t"  \
+                                                     \
+            : [val_lh_m] "=r" (val_lh_m)             \
+            : [psrc_lh_m] "m" (*psrc_lh_m)           \
+        );                                           \
+                                                     \
+        val_lh_m;                                    \
+    } )
+
+    #define LW(psrc)                                 \
+    ( {                                              \
+        uint8_t *psrc_lw_m = (uint8_t *) (psrc);     \
+        uint32_t val_lw_m;                           \
+                                                     \
+        __asm__ volatile (                           \
+            "ulw  %[val_lw_m],  %[psrc_lw_m]  \n\t"  \
+                                                     \
+            : [val_lw_m] "=r" (val_lw_m)             \
+            : [psrc_lw_m] "m" (*psrc_lw_m)           \
+        );                                           \
+                                                     \
+        val_lw_m;                                    \
+    } )
+
+    #if (__mips == 64)
+        #define LD(psrc)                                 \
+        ( {                                              \
+            uint8_t *psrc_ld_m = (uint8_t *) (psrc);     \
+            uint64_t val_ld_m = 0;                       \
+                                                         \
+            __asm__ volatile (                           \
+                "uld  %[val_ld_m],  %[psrc_ld_m]  \n\t"  \
+                                                         \
+                : [val_ld_m] "=r" (val_ld_m)             \
+                : [psrc_ld_m] "m" (*psrc_ld_m)           \
+            );                                           \
+                                                         \
+            val_ld_m;                                    \
+        } )
+    #else  // !(__mips == 64)
+        #define LD(psrc)                                                    \
+        ( {                                                                 \
+            uint8_t *psrc_ld_m = (uint8_t *) (psrc);                        \
+            uint32_t val0_ld_m, val1_ld_m;                                  \
+            uint64_t val_ld_m = 0;                                          \
+                                                                            \
+            val0_ld_m = LW(psrc_ld_m);                                      \
+            val1_ld_m = LW(psrc_ld_m + 4);                                  \
+                                                                            \
+            val_ld_m = (uint64_t) (val1_ld_m);                              \
+            val_ld_m = (uint64_t) ((val_ld_m << 32) & 0xFFFFFFFF00000000);  \
+            val_ld_m = (uint64_t) (val_ld_m | (uint64_t) val0_ld_m);        \
+                                                                            \
+            val_ld_m;                                                       \
+        } )
+    #endif  // (__mips == 64)
+
+    #define SH(val, pdst)                            \
+    {                                                \
+        uint8_t *pdst_sh_m = (uint8_t *) (pdst);     \
+        uint16_t val_sh_m = (val);                   \
+                                                     \
+        __asm__ volatile (                           \
+            "ush  %[val_sh_m],  %[pdst_sh_m]  \n\t"  \
+                                                     \
+            : [pdst_sh_m] "=m" (*pdst_sh_m)          \
+            : [val_sh_m] "r" (val_sh_m)              \
+        );                                           \
+    }
+
+    #define SW(val, pdst)                            \
+    {                                                \
+        uint8_t *pdst_sw_m = (uint8_t *) (pdst);     \
+        uint32_t val_sw_m = (val);                   \
+                                                     \
+        __asm__ volatile (                           \
+            "usw  %[val_sw_m],  %[pdst_sw_m]  \n\t"  \
+                                                     \
+            : [pdst_sw_m] "=m" (*pdst_sw_m)          \
+            : [val_sw_m] "r" (val_sw_m)              \
+        );                                           \
+    }
+
+    #define SD(val, pdst)                                             \
+    {                                                                 \
+        uint8_t *pdst_sd_m = (uint8_t *) (pdst);                      \
+        uint32_t val0_sd_m, val1_sd_m;                                \
+                                                                      \
+        val0_sd_m = (uint32_t) ((val) & 0x00000000FFFFFFFF);          \
+        val1_sd_m = (uint32_t) (((val) >> 32) & 0x00000000FFFFFFFF);  \
+                                                                      \
+        SW(val0_sd_m, pdst_sd_m);                                     \
+        SW(val1_sd_m, pdst_sd_m + 4);                                 \
+    }
+#endif // (__mips_isa_rev >= 6)
+
+
+
+
+
+
+/* Description : Load vector elements with stride.
+ * Arguments   : Inputs  - psrc    (source pointer to load from)
+ *                       - stride
+ *               Outputs - out0, out1...
+ *               Return Type - as per RTYPE
+ * Details     : Loads elements in 'out0' from (psrc).
+ *               Loads elements in 'out1' from (psrc + stride).
+ */
+#define MSA_LD_V(RTYPE, psrc, out) (out) = *((RTYPE *)(psrc));
+
+#define MSA_LD_V2(RTYPE, psrc, stride, out0, out1)  \
+{                                                   \
+    MSA_LD_V(RTYPE, (psrc), out0);                  \
+    MSA_LD_V(RTYPE, (psrc) + (stride), out1);       \
+}
+
+#define MSA_LD_V4(RTYPE, psrc, stride, out0, out1, out2, out3)     \
+{                                                                  \
+    MSA_LD_V2(RTYPE, (psrc), stride, out0, out1);                  \
+    MSA_LD_V2(RTYPE, (psrc) + 2 * (stride) , stride, out2, out3);  \
+}
+
+#define MSA_LD_V8(RTYPE, psrc, stride, out0, out1, out2, out3,                \
+                  out4, out5, out6, out7)                                     \
+{                                                                             \
+    MSA_LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3);                 \
+    MSA_LD_V4(RTYPE, (psrc) + 4 * (stride), stride, out4, out5, out6, out7);  \
+}
+
+/* Description : Store vectors with stride.
+ * Arguments   : Inputs  - in0, in1...  (source vector to be stored)
+ *                       - stride
+ *               Outputs - pdst    (destination pointer to store to)
+ * Details     : Stores elements from 'in0' to (pdst).
+ *               Stores elements from 'in1' to (pdst + stride).
+ */
+#define MSA_ST_V(RTYPE, in, pdst) *((RTYPE *)(pdst)) = (in);
+
+#define MSA_ST_V2(RTYPE, in0, in1, pdst, stride)  \
+{                                                 \
+    MSA_ST_V(RTYPE, in0, (pdst));                 \
+    MSA_ST_V(RTYPE, in1, (pdst) + (stride));      \
+}
+
+#define MSA_ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride)      \
+{                                                               \
+    MSA_ST_V2(RTYPE, in0, in1, (pdst), stride);                 \
+    MSA_ST_V2(RTYPE, in2, in3, (pdst) + 2 * (stride), stride);  \
+}
+
+#define MSA_ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
+{                                                                              \
+    MSA_ST_V4(RTYPE, in0, in1, in2, in3, (pdst), stride);                      \
+    MSA_ST_V4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * (stride), stride);       \
+}
+
+/* Description : Store half word elements of vector with stride.
+ * Arguments   : Inputs  - in      (source vector)
+ *                       - pdst    (destination pointer to store to)
+ *                       - stride
+ * Details     : Stores half word 'idx0' from 'in' to (pdst).
+ *               Stores half word 'idx1' from 'in' to (pdst + stride).
+ *               Similar for other elements.
+ */
+#define MSA_ST_H(in, idx, pdst)                          \
+{                                                        \
+    uint16_t out0_m;                                     \
+    out0_m = __msa_copy_u_h((v8i16) in, idx);            \
+    SH(out0_m, (pdst));                                  \
+}
+#define MSA_ST_H2(in, idx0, idx1, pdst, stride)          \
+{                                                        \
+    uint16_t out0_m, out1_m;                             \
+    out0_m = __msa_copy_u_h((v8i16) in, idx0);           \
+    out1_m = __msa_copy_u_h((v8i16) in, idx1);           \
+    SH(out0_m, (pdst));                                  \
+    SH(out1_m, (pdst) + stride);                         \
+}
+#define MSA_ST_H4(in, idx0, idx1, idx2, idx3, pdst, stride)          \
+{                                                                    \
+    uint16_t out0_m, out1_m, out2_m, out3_m;                         \
+    out0_m = __msa_copy_u_h((v8i16) in, idx0);                       \
+    out1_m = __msa_copy_u_h((v8i16) in, idx1);                       \
+    out2_m = __msa_copy_u_h((v8i16) in, idx2);                       \
+    out3_m = __msa_copy_u_h((v8i16) in, idx3);                       \
+    SH(out0_m, (pdst));                                              \
+    SH(out1_m, (pdst) + stride);                                     \
+    SH(out2_m, (pdst) + 2 * stride);                                 \
+    SH(out3_m, (pdst) + 3 * stride);                                 \
+}
+#define MSA_ST_H8(in, idx0, idx1, idx2, idx3, idx4, idx5,            \
+              idx6, idx7, pdst, stride)                              \
+{                                                                    \
+    MSA_ST_H4(in, idx0, idx1, idx2, idx3, pdst, stride)              \
+    MSA_ST_H4(in, idx4, idx5, idx6, idx7, (pdst) + 4*stride, stride) \
+}
+
+/* Description : Store word elements of vector with stride.
+ * Arguments   : Inputs  - in      (source vector)
+ *                       - pdst    (destination pointer to store to)
+ *                       - stride
+ * Details     : Stores word 'idx0' from 'in' to (pdst).
+ *               Stores word 'idx1' from 'in' to (pdst + stride).
+ *               Similar for other elements.
+ */
+#define MSA_ST_W(in, idx, pdst)                          \
+{                                                        \
+    uint32_t out0_m;                                     \
+    out0_m = __msa_copy_u_w((v4i32) in, idx);            \
+    SW(out0_m, (pdst));                                  \
+}
+#define MSA_ST_W2(in, idx0, idx1, pdst, stride)          \
+{                                                        \
+    uint32_t out0_m, out1_m;                             \
+    out0_m = __msa_copy_u_w((v4i32) in, idx0);           \
+    out1_m = __msa_copy_u_w((v4i32) in, idx1);           \
+    SW(out0_m, (pdst));                                  \
+    SW(out1_m, (pdst) + stride);                         \
+}
+#define MSA_ST_W4(in, idx0, idx1, idx2, idx3, pdst, stride)         \
+{                                                                   \
+    uint32_t out0_m, out1_m, out2_m, out3_m;                        \
+    out0_m = __msa_copy_u_w((v4i32) in, idx0);                      \
+    out1_m = __msa_copy_u_w((v4i32) in, idx1);                      \
+    out2_m = __msa_copy_u_w((v4i32) in, idx2);                      \
+    out3_m = __msa_copy_u_w((v4i32) in, idx3);                      \
+    SW(out0_m, (pdst));                                             \
+    SW(out1_m, (pdst) + stride);                                    \
+    SW(out2_m, (pdst) + 2*stride);                                  \
+    SW(out3_m, (pdst) + 3*stride);                                  \
+}
+#define MSA_ST_W8(in0, in1, idx0, idx1, idx2, idx3,                 \
+              idx4, idx5, idx6, idx7, pdst, stride)                 \
+{                                                                   \
+    MSA_ST_W4(in0, idx0, idx1, idx2, idx3, pdst, stride)            \
+    MSA_ST_W4(in1, idx4, idx5, idx6, idx7, pdst + 4*stride, stride) \
+}
+
+/* Description : Store double word elements of vector with stride.
+ * Arguments   : Inputs  - in      (source vector)
+ *                       - pdst    (destination pointer to store to)
+ *                       - stride
+ * Details     : Stores double word 'idx0' from 'in' to (pdst).
+ *               Stores double word 'idx1' from 'in' to (pdst + stride).
+ *               Similar for other elements.
+ */
+#define MSA_ST_D(in, idx, pdst)                    \
+{                                                  \
+    uint64_t out0_m;                               \
+    out0_m = __msa_copy_u_d((v2i64) in, idx);      \
+    SD(out0_m, (pdst));                            \
+}
+#define MSA_ST_D2(in, idx0, idx1, pdst, stride)    \
+{                                                  \
+    uint64_t out0_m, out1_m;                       \
+    out0_m = __msa_copy_u_d((v2i64) in, idx0);     \
+    out1_m = __msa_copy_u_d((v2i64) in, idx1);     \
+    SD(out0_m, (pdst));                            \
+    SD(out1_m, (pdst) + stride);                   \
+}
+#define MSA_ST_D4(in0, in1, idx0, idx1, idx2, idx3, pdst, stride)          \
+{                                                                          \
+    uint64_t out0_m, out1_m, out2_m, out3_m;                               \
+    out0_m = __msa_copy_u_d((v2i64) in0, idx0);                            \
+    out1_m = __msa_copy_u_d((v2i64) in0, idx1);                            \
+    out2_m = __msa_copy_u_d((v2i64) in1, idx2);                            \
+    out3_m = __msa_copy_u_d((v2i64) in1, idx3);                            \
+    SD(out0_m, (pdst));                                                    \
+    SD(out1_m, (pdst) + stride);                                           \
+    SD(out2_m, (pdst) + 2 * stride);                                       \
+    SD(out3_m, (pdst) + 3 * stride);                                       \
+}
+#define MSA_ST_D8(in0, in1, in2, in3, idx0, idx1, idx2, idx3,              \
+              idx4, idx5, idx6, idx7, pdst, stride)                        \
+{                                                                          \
+    MSA_ST_D4(in0, in1, idx0, idx1, idx2, idx3, pdst, stride)              \
+    MSA_ST_D4(in2, in3, idx4, idx5, idx6, idx7, pdst + 4 * stride, stride) \
+}
+
+/* Description : Shuffle byte vector elements as per mask vector.
+ * Arguments   : Inputs  - in0, in1  (source vectors)
+ *                       - mask      (mask vectors)
+ *               Outputs - out       (dstination vectors)
+ *               Return Type - as per RTYPE
+ * Details     : Selective byte elements from 'in0' & 'in1' are copied to 'out' as
+ *               per control vector 'mask'.
+ */
+#define MSA_VSHF_B(RTYPE, in0, in1, mask, out)                             \
+{                                                                          \
+    out = (RTYPE) __msa_vshf_b((v16i8) mask, (v16i8) in0, (v16i8) in1);    \
+}
+
+#define MSA_VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1)   \
+{                                                                          \
+    MSA_VSHF_B(RTYPE, in0, in1, mask0, out0)                               \
+    MSA_VSHF_B(RTYPE, in2, in3, mask1, out1)                               \
+}
+
+#define MSA_VSHF_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,         \
+                    mask0, mask1, mask2, mask3, out0, out1, out2, out3)    \
+{                                                                          \
+    MSA_VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1);      \
+    MSA_VSHF_B2(RTYPE, in4, in5, in6, in7, mask2, mask3, out2, out3);      \
+}
+
+/* Description : Shuffle halfword vector elements as per mask vector.
+ * Arguments   : Inputs  - in0, in1  (source vectors)
+ *                       - mask      (mask vectors)
+ *               Outputs - out       (dstination vectors)
+ *               Return Type - as per RTYPE
+ * Details     : Selective halfword elements from 'in0' & 'in1' are copied to 'out' as
+ *               per control vector 'mask'.
+ */
+#define MSA_VSHF_H(RTYPE, in0, in1, mask, out)                             \
+{                                                                          \
+    out = (RTYPE) __msa_vshf_h((v8i16) mask, (v8i16) in0, (v8i16) in1);    \
+}
+
+#define MSA_VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1)   \
+{                                                                          \
+    MSA_VSHF_H(RTYPE, in0, in1, mask0, out0)                               \
+    MSA_VSHF_H(RTYPE, in2, in3, mask1, out1)                               \
+}
+
+#define MSA_VSHF_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,         \
+                    mask0, mask1, mask2, mask3, out0, out1, out2, out3)    \
+{                                                                          \
+    MSA_VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1);      \
+    MSA_VSHF_H2(RTYPE, in4, in5, in6, in7, mask2, mask3, out2, out3);      \
+}
+
+/* Description : Shuffle word vector elements as per mask vector.
+ * Arguments   : Inputs  - in0, in1  (source vectors)
+ *                       - mask      (mask vectors)
+ *               Outputs - out       (dstination vectors)
+ *               Return Type - as per RTYPE
+ * Details     : Selective word elements from 'in0' & 'in1' are copied to 'out' as
+ *               per control vector 'mask'.
+ */
+#define MSA_VSHF_W(RTYPE, in0, in1, mask, out)                             \
+{                                                                          \
+    out = (RTYPE) __msa_vshf_w((v4i32) mask, (v4i32) in0, (v4i32) in1);    \
+}
+
+#define MSA_VSHF_W2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1)   \
+{                                                                          \
+    MSA_VSHF_W(RTYPE, in0, in1, mask0, out0)                               \
+    MSA_VSHF_W(RTYPE, in2, in3, mask1, out1)                               \
+}
+
+#define MSA_VSHF_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,         \
+                    mask0, mask1, mask2, mask3, out0, out1, out2, out3)    \
+{                                                                          \
+    MSA_VSHF_W2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1);      \
+    MSA_VSHF_W2(RTYPE, in4, in5, in6, in7, mask2, mask3, out2, out3);      \
+}
+
+/* Description : Interleave even byte elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Even byte elements of 'in0' and even byte
+ *               elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVEV_B(RTYPE, in0, in1, out)                   \
+{                                                           \
+    out = (RTYPE) __msa_ilvev_b((v16i8) in0, (v16i8) in1);  \
+}
+
+#define MSA_ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_ILVEV_B(RTYPE, in0, in1, out0);                      \
+    MSA_ILVEV_B(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                     out0, out1, out2, out3)                         \
+{                                                                    \
+    MSA_ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVEV_B2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave even half word elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Even half word elements of 'in0' and even half word
+ *               elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVEV_H(RTYPE, in0, in1, out)                   \
+{                                                           \
+    out = (RTYPE) __msa_ilvev_h((v8i16) in0, (v8i16) in1);  \
+}
+
+#define MSA_ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_ILVEV_H(RTYPE, in0, in1, out0);                      \
+    MSA_ILVEV_H(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                     out0, out1, out2, out3)                         \
+{                                                                    \
+    MSA_ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVEV_H2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave even word elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Even word elements of 'in0' and even word
+ *               elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVEV_W(RTYPE, in0, in1, out)                   \
+{                                                           \
+    out = (RTYPE) __msa_ilvev_w((v2i64) in0, (v2i64) in1);  \
+}
+
+#define MSA_ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_ILVEV_W(RTYPE, in0, in1, out0);                      \
+    MSA_ILVEV_W(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVEV_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                     out0, out1, out2, out3)                         \
+{                                                                    \
+    MSA_ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVEV_W2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave even double word elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Even double word elements of 'in0' and even double word
+ *               elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVEV_D(RTYPE, in0, in1, out)                   \
+{                                                           \
+    out = (RTYPE) __msa_ilvev_d((v2i64) in0, (v2i64) in1);  \
+}
+
+#define MSA_ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_ILVEV_D(RTYPE, in0, in1, out0);                      \
+    MSA_ILVEV_D(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVEV_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                     out0, out1, out2, out3)                         \
+{                                                                    \
+    MSA_ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVEV_D2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave odd byte elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Odd byte elements of 'in0' and odd byte
+ *               elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVOD_B(RTYPE, in0, in1, out)                   \
+{                                                           \
+    out = (RTYPE) __msa_ilvod_b((v16i8) in0, (v16i8) in1);  \
+}
+
+#define MSA_ILVOD_B2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_ILVOD_B(RTYPE, in0, in1, out0);                      \
+    MSA_ILVOD_B(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVOD_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                     out0, out1, out2, out3)                         \
+{                                                                    \
+    MSA_ILVOD_B2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVOD_B2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave odd half word elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Odd half word elements of 'in0' and odd half word
+ *               elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVOD_H(RTYPE, in0, in1, out)                   \
+{                                                           \
+    out = (RTYPE) __msa_ilvod_h((v8i16) in0, (v8i16) in1);  \
+}
+
+#define MSA_ILVOD_H2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_ILVOD_H(RTYPE, in0, in1, out0);                      \
+    MSA_ILVOD_H(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVOD_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                     out0, out1, out2, out3)                         \
+{                                                                    \
+    MSA_ILVOD_H2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVOD_H2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave odd word elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Odd word elements of 'in0' and odd word
+ *               elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVOD_W(RTYPE, in0, in1, out)                   \
+{                                                           \
+    out = (RTYPE) __msa_ilvod_w((v4i32) in0, (v4i32) in1);  \
+}
+
+#define MSA_ILVOD_W2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_ILVOD_W(RTYPE, in0, in1, out0);                      \
+    MSA_ILVOD_W(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVOD_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                     out0, out1, out2, out3)                         \
+{                                                                    \
+    MSA_ILVOD_W2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVOD_W2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave odd double word elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Odd double word elements of 'in0' and odd double word
+ *               elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVOD_D(RTYPE, in0, in1, out)                   \
+{                                                           \
+    out = (RTYPE) __msa_ilvod_d((v2i64) in0, (v2i64) in1);  \
+}
+
+#define MSA_ILVOD_D2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_ILVOD_D(RTYPE, in0, in1, out0);                      \
+    MSA_ILVOD_D(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVOD_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                     out0, out1, out2, out3)                         \
+{                                                                    \
+    MSA_ILVOD_D2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVOD_D2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave left half of byte elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Left half of byte elements of 'in0' and left half of byte
+ *               elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVL_B(RTYPE, in0, in1, out)                   \
+{                                                          \
+    out = (RTYPE) __msa_ilvl_b((v16i8) in0, (v16i8) in1);  \
+}
+
+#define MSA_ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                           \
+    MSA_ILVL_B(RTYPE, in0, in1, out0);                      \
+    MSA_ILVL_B(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                    out0, out1, out2, out3)                         \
+{                                                                   \
+    MSA_ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave left half of halfword elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Left half of halfword elements of 'in0' and left half of halfword
+ *               elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVL_H(RTYPE, in0, in1, out)                   \
+{                                                          \
+    out = (RTYPE) __msa_ilvl_h((v8i16) in0, (v8i16) in1);  \
+}
+
+#define MSA_ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                           \
+    MSA_ILVL_H(RTYPE, in0, in1, out0);                      \
+    MSA_ILVL_H(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVL_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                    out0, out1, out2, out3)                         \
+{                                                                   \
+    MSA_ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVL_H2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave left half of word elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Left half of word elements of 'in0' and left half of word
+ *               elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVL_W(RTYPE, in0, in1, out)                   \
+{                                                          \
+    out = (RTYPE) __msa_ilvl_w((v4i32) in0, (v4i32) in1);  \
+}
+
+#define MSA_ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                           \
+    MSA_ILVL_W(RTYPE, in0, in1, out0);                      \
+    MSA_ILVL_W(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVL_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                    out0, out1, out2, out3)                         \
+{                                                                   \
+    MSA_ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVL_W2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave left half of double word elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Left half of double word elements of 'in0' and left half of
+ *               double word elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVL_D(RTYPE, in0, in1, out)                   \
+{                                                          \
+    out = (RTYPE) __msa_ilvl_d((v2i64) in0, (v2i64) in1);  \
+}
+
+#define MSA_ILVL_D2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                           \
+    MSA_ILVL_D(RTYPE, in0, in1, out0);                      \
+    MSA_ILVL_D(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVL_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                    out0, out1, out2, out3)                         \
+{                                                                   \
+    MSA_ILVL_D2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVL_D2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave right half of byte elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Right half of byte elements of 'in0' and right half of byte
+ *               elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVR_B(RTYPE, in0, in1, out)                   \
+{                                                          \
+    out = (RTYPE) __msa_ilvr_b((v16i8) in0, (v16i8) in1);  \
+}
+
+#define MSA_ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                           \
+    MSA_ILVR_B(RTYPE, in0, in1, out0);                      \
+    MSA_ILVR_B(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                    out0, out1, out2, out3)                         \
+{                                                                   \
+    MSA_ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave right half of halfword elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Right half of halfword elements of 'in0' and right half of halfword
+ *               elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVR_H(RTYPE, in0, in1, out)                   \
+{                                                          \
+    out = (RTYPE) __msa_ilvr_h((v8i16) in0, (v8i16) in1);  \
+}
+
+#define MSA_ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                           \
+    MSA_ILVR_H(RTYPE, in0, in1, out0);                      \
+    MSA_ILVR_H(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                    out0, out1, out2, out3)                         \
+{                                                                   \
+    MSA_ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave right half of word elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Right half of word elements of 'in0' and right half of word
+ *               elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVR_W(RTYPE, in0, in1, out)                   \
+{                                                          \
+    out = (RTYPE) __msa_ilvr_w((v4i32) in0, (v4i32) in1);  \
+}
+
+#define MSA_ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                           \
+    MSA_ILVR_W(RTYPE, in0, in1, out0);                      \
+    MSA_ILVR_W(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVR_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                    out0, out1, out2, out3)                         \
+{                                                                   \
+    MSA_ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVR_W2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave right half of double word elements from vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Right half of double word elements of 'in0' and right half of
+ *               double word elements of 'in1' are interleaved and copied to 'out'.
+ */
+#define MSA_ILVR_D(RTYPE, in0, in1, out)                   \
+{                                                          \
+    out = (RTYPE) __msa_ilvr_d((v2i64) in0, (v2i64) in1);  \
+}
+
+#define MSA_ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                           \
+    MSA_ILVR_D(RTYPE, in0, in1, out0);                      \
+    MSA_ILVR_D(RTYPE, in2, in3, out1);                      \
+}
+
+#define MSA_ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                    out0, out1, out2, out3)                         \
+{                                                                   \
+    MSA_ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1);             \
+    MSA_ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3);             \
+}
+
+/* Description : Interleave both left and right half of input vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out0, out1
+ *               Return Type - as per RTYPE
+ * Details     : Right half of byte elements from 'in0' and 'in1' are
+ *               interleaved and stored to 'out0'.
+ *               Left half of byte elements from 'in0' and 'in1' are
+ *               interleaved and stored to 'out1'.
+ */
+#define MSA_ILVRL_B2(RTYPE, in0, in1, out0, out1)  \
+{                                                  \
+    MSA_ILVR_B(RTYPE, in0, in1, out0);             \
+    MSA_ILVL_B(RTYPE, in0, in1, out1);             \
+}
+
+#define MSA_ILVRL_B4(RTYPE, in0, in1, in2, in3,    \
+                     out0, out1, out2, out3)       \
+{                                                  \
+    MSA_ILVRL_B2(RTYPE, in0, in1, out0, out1);     \
+    MSA_ILVRL_B2(RTYPE, in2, in3, out2, out3);     \
+}
+
+/* Description : Interleave both left and right half of input vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out0, out1
+ *               Return Type - as per RTYPE
+ * Details     : Right half of halfword elements from 'in0' and 'in1' are
+ *               interleaved and stored to 'out0'.
+ *               Left half of halfword elements from 'in0' and 'in1' are
+ *               interleaved and stored to 'out1'.
+ */
+#define MSA_ILVRL_H2(RTYPE, in0, in1, out0, out1)  \
+{                                                  \
+    MSA_ILVR_H(RTYPE, in0, in1, out0);             \
+    MSA_ILVL_H(RTYPE, in0, in1, out1);             \
+}
+
+#define MSA_ILVRL_H4(RTYPE, in0, in1, in2, in3,    \
+                     out0, out1, out2, out3)       \
+{                                                  \
+    MSA_ILVRL_H2(RTYPE, in0, in1, out0, out1);     \
+    MSA_ILVRL_H2(RTYPE, in2, in3, out2, out3);     \
+}
+
+/* Description : Interleave both left and right half of input vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out0, out1
+ *               Return Type - as per RTYPE
+ * Details     : Right half of word elements from 'in0' and 'in1' are
+ *               interleaved and stored to 'out0'.
+ *               Left half of word elements from 'in0' and 'in1' are
+ *               interleaved and stored to 'out1'.
+ */
+#define MSA_ILVRL_W2(RTYPE, in0, in1, out0, out1)  \
+{                                                  \
+    MSA_ILVR_W(RTYPE, in0, in1, out0);             \
+    MSA_ILVL_W(RTYPE, in0, in1, out1);             \
+}
+
+#define MSA_ILVRL_W4(RTYPE, in0, in1, in2, in3,    \
+                     out0, out1, out2, out3)       \
+{                                                  \
+    MSA_ILVRL_W2(RTYPE, in0, in1, out0, out1);     \
+    MSA_ILVRL_W2(RTYPE, in2, in3, out2, out3);     \
+}
+
+/* Description : Interleave both left and right half of input vectors.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out0, out1
+ *               Return Type - as per RTYPE
+ * Details     : Right half of double word elements from 'in0' and 'in1' are
+ *               interleaved and stored to 'out0'.
+ *               Left half of double word elements from 'in0' and 'in1' are
+ *               interleaved and stored to 'out1'.
+ */
+#define MSA_ILVRL_D2(RTYPE, in0, in1, out0, out1)  \
+{                                                  \
+    MSA_ILVR_D(RTYPE, in0, in1, out0);             \
+    MSA_ILVL_D(RTYPE, in0, in1, out1);             \
+}
+
+#define MSA_ILVRL_D4(RTYPE, in0, in1, in2, in3,    \
+                     out0, out1, out2, out3)       \
+{                                                  \
+    MSA_ILVRL_D2(RTYPE, in0, in1, out0, out1);     \
+    MSA_ILVRL_D2(RTYPE, in2, in3, out2, out3);     \
+}
+
+/* Description : Indexed byte elements are replicated to all elements in
+ *               output vector.
+ * Arguments   : Inputs  - in, idx
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : 'idx' element value from 'in' vector is replicated to all
+ *               elements in 'out' vector.
+ *               Valid index range for halfword operation is 0-7.
+ */
+#define MSA_SPLATI_B(RTYPE, in, idx, out)                 \
+{                                                         \
+    out = (RTYPE) __msa_splati_b((v16i8) in, idx);        \
+}
+
+#define MSA_SPLATI_B2(RTYPE, in, idx0, idx1, out0, out1)  \
+{                                                         \
+    MSA_SPLATI_B(RTYPE, in, idx0, out0)                   \
+    MSA_SPLATI_B(RTYPE, in, idx1, out1)                   \
+}
+
+#define MSA_SPLATI_B4(RTYPE, in, idx0, idx1, idx2, idx3,  \
+                      out0, out1, out2, out3)             \
+{                                                         \
+    MSA_SPLATI_B2(RTYPE, in, idx0, idx1, out0, out1)      \
+    MSA_SPLATI_B2(RTYPE, in, idx2, idx3, out2, out3)      \
+}
+
+/* Description : Indexed halfword elements are replicated to all elements in
+ *               output vector.
+ * Arguments   : Inputs  - in, idx
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : 'idx' element value from 'in' vector is replicated to all
+ *               elements in 'out' vector.
+ *               Valid index range for halfword operation is 0-7.
+ */
+#define MSA_SPLATI_H(RTYPE, in, idx, out)                 \
+{                                                         \
+    out = (RTYPE) __msa_splati_h((v8i16) in, idx);        \
+}
+
+#define MSA_SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1)  \
+{                                                         \
+    MSA_SPLATI_H(RTYPE, in, idx0, out0)                   \
+    MSA_SPLATI_H(RTYPE, in, idx1, out1)                   \
+}
+
+#define MSA_SPLATI_H4(RTYPE, in, idx0, idx1, idx2, idx3,  \
+                      out0, out1, out2, out3)             \
+{                                                         \
+    MSA_SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1)      \
+    MSA_SPLATI_H2(RTYPE, in, idx2, idx3, out2, out3)      \
+}
+
+/* Description : Indexed word elements are replicated to all elements in
+ *               output vector.
+ * Arguments   : Inputs  - in, idx
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : 'idx' element value from 'in' vector is replicated to all
+ *               elements in 'out' vector.
+ *               Valid index range for halfword operation is 0-3.
+ */
+#define MSA_SPLATI_W(RTYPE, in, idx, out)                 \
+{                                                         \
+    out = (RTYPE) __msa_splati_w((v4i32) in, idx);        \
+}
+
+#define MSA_SPLATI_W2(RTYPE, in, idx0, idx1, out0, out1)  \
+{                                                         \
+    MSA_SPLATI_W(RTYPE, in, idx0, out0)                   \
+    MSA_SPLATI_W(RTYPE, in, idx1, out1)                   \
+}
+
+#define MSA_SPLATI_W4(RTYPE, in, idx0, idx1, idx2, idx3,  \
+                      out0, out1, out2, out3)             \
+{                                                         \
+    MSA_SPLATI_W2(RTYPE, in, idx0, idx1, out0, out1)      \
+    MSA_SPLATI_W2(RTYPE, in, idx2, idx3, out2, out3)      \
+}
+
+/* Description : Pack even byte elements of vector pairs.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Even byte elements of 'in0' are copied to the left half of
+ *               'out' & even byte elements of 'in1' are copied to the right
+ *               half of 'out'.
+ */
+#define MSA_PCKEV_B(RTYPE, in0, in1, out)                    \
+{                                                            \
+    out = (RTYPE) __msa_pckev_b((v16i8) in0, (v16i8) in1);   \
+}
+
+#define MSA_PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_PCKEV_B(RTYPE, in0, in1, out0)                       \
+    MSA_PCKEV_B(RTYPE, in2, in3, out1)                       \
+}
+
+#define MSA_PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5,    \
+                     in6, in7, out0, out1, out2, out3)       \
+{                                                            \
+    MSA_PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1)      \
+    MSA_PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3)      \
+}
+
+/* Description : Pack even halfword elements of vector pairs.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Even halfword elements of 'in0' are copied to the left half of
+ *               'out' & even halfword elements of 'in1' are copied to the right
+ *               half of 'out'.
+ */
+#define MSA_PCKEV_H(RTYPE, in0, in1, out)                    \
+{                                                            \
+    out = (RTYPE) __msa_pckev_h((v8i16) in0, (v8i16) in1);   \
+}
+
+#define MSA_PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_PCKEV_H(RTYPE, in0, in1, out0)                       \
+    MSA_PCKEV_H(RTYPE, in2, in3, out1)                       \
+}
+
+#define MSA_PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5,    \
+                     in6, in7, out0, out1, out2, out3)       \
+{                                                            \
+    MSA_PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1)      \
+    MSA_PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3)      \
+}
+
+/* Description : Pack even word elements of vector pairs.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Even word elements of 'in0' are copied to the left half of
+ *               'out' & even word elements of 'in1' are copied to the right
+ *               half of 'out'.
+ */
+#define MSA_PCKEV_W(RTYPE, in0, in1, out)                    \
+{                                                            \
+    out = (RTYPE) __msa_pckev_w((v4i32) in0, (v4i32) in1);   \
+}
+
+#define MSA_PCKEV_W2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_PCKEV_W(RTYPE, in0, in1, out0)                       \
+    MSA_PCKEV_W(RTYPE, in2, in3, out1)                       \
+}
+
+#define MSA_PCKEV_W4(RTYPE, in0, in1, in2, in3, in4, in5,    \
+                     in6, in7, out0, out1, out2, out3)       \
+{                                                            \
+    MSA_PCKEV_W2(RTYPE, in0, in1, in2, in3, out0, out1)      \
+    MSA_PCKEV_W2(RTYPE, in4, in5, in6, in7, out2, out3)      \
+}
+
+/* Description : Pack even double word elements of vector pairs.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Even double word elements of 'in0' are copied to the left
+ *               half of 'out' & even double word elements of 'in1' are
+ *               copied to the right half of 'out'.
+ */
+#define MSA_PCKEV_D(RTYPE, in0, in1, out)                    \
+{                                                            \
+    out = (RTYPE) __msa_pckev_d((v2i64) in0, (v2i64) in1);   \
+}
+
+#define MSA_PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_PCKEV_D(RTYPE, in0, in1, out0)                       \
+    MSA_PCKEV_D(RTYPE, in2, in3, out1)                       \
+}
+
+#define MSA_PCKEV_D4(RTYPE, in0, in1, in2, in3, in4, in5,    \
+                     in6, in7, out0, out1, out2, out3)       \
+{                                                            \
+    MSA_PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1)      \
+    MSA_PCKEV_D2(RTYPE, in4, in5, in6, in7, out2, out3)      \
+}
+
+/* Description : Pack odd byte elements of vector pairs.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Odd byte elements of 'in0' are copied to the left half of
+ *               'out' & odd byte elements of 'in1' are copied to the right
+ *               half of 'out'.
+ */
+#define MSA_PCKOD_B(RTYPE, in0, in1, out)                    \
+{                                                            \
+    out = (RTYPE) __msa_pckod_b((v16i8) in0, (v16i8) in1);   \
+}
+
+#define MSA_PCKOD_B2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_PCKOD_B(RTYPE, in0, in1, out0)                       \
+    MSA_PCKOD_B(RTYPE, in2, in3, out1)                       \
+}
+
+#define MSA_PCKOD_B4(RTYPE, in0, in1, in2, in3, in4, in5,    \
+                     in6, in7, out0, out1, out2, out3)       \
+{                                                            \
+    MSA_PCKOD_B2(RTYPE, in0, in1, in2, in3, out0, out1)      \
+    MSA_PCKOD_B2(RTYPE, in4, in5, in6, in7, out2, out3)      \
+}
+
+/* Description : Pack odd halfword elements of vector pairs.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Odd halfword elements of 'in0' are copied to the left half of
+ *               'out' & odd halfword elements of 'in1' are copied to the right
+ *               half of 'out'.
+ */
+#define MSA_PCKOD_H(RTYPE, in0, in1, out)                    \
+{                                                            \
+    out = (RTYPE) __msa_pckod_h((v8i16) in0, (v8i16) in1);   \
+}
+
+#define MSA_PCKOD_H2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_PCKOD_H(RTYPE, in0, in1, out0)                       \
+    MSA_PCKOD_H(RTYPE, in2, in3, out1)                       \
+}
+
+#define MSA_PCKOD_H4(RTYPE, in0, in1, in2, in3, in4, in5,    \
+                     in6, in7, out0, out1, out2, out3)       \
+{                                                            \
+    MSA_PCKOD_H2(RTYPE, in0, in1, in2, in3, out0, out1)      \
+    MSA_PCKOD_H2(RTYPE, in4, in5, in6, in7, out2, out3)      \
+}
+
+/* Description : Pack odd word elements of vector pairs.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Odd word elements of 'in0' are copied to the left half of
+ *               'out' & odd word elements of 'in1' are copied to the right
+ *               half of 'out'.
+ */
+#define MSA_PCKOD_W(RTYPE, in0, in1, out)                    \
+{                                                            \
+    out = (RTYPE) __msa_pckod_w((v4i32) in0, (v4i32) in1);   \
+}
+
+#define MSA_PCKOD_W2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_PCKOD_W(RTYPE, in0, in1, out0)                       \
+    MSA_PCKOD_W(RTYPE, in2, in3, out1)                       \
+}
+
+#define MSA_PCKOD_W4(RTYPE, in0, in1, in2, in3, in4, in5,    \
+                     in6, in7, out0, out1, out2, out3)       \
+{                                                            \
+    MSA_PCKOD_W2(RTYPE, in0, in1, in2, in3, out0, out1)      \
+    MSA_PCKOD_W2(RTYPE, in4, in5, in6, in7, out2, out3)      \
+}
+
+/* Description : Pack odd double word elements of vector pairs.
+ * Arguments   : Inputs  - in0, in1
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Odd double word elements of 'in0' are copied to the left
+ *               half of 'out' & odd double word elements of 'in1' are
+ *               copied to the right half of 'out'.
+ */
+#define MSA_PCKOD_D(RTYPE, in0, in1, out)                    \
+{                                                            \
+    out = (RTYPE) __msa_pckod_d((v2i64) in0, (v2i64) in1);   \
+}
+
+#define MSA_PCKOD_D2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+{                                                            \
+    MSA_PCKOD_D(RTYPE, in0, in1, out0)                       \
+    MSA_PCKOD_D(RTYPE, in2, in3, out1)                       \
+}
+
+#define MSA_PCKOD_D4(RTYPE, in0, in1, in2, in3, in4, in5,    \
+                     in6, in7, out0, out1, out2, out3)       \
+{                                                            \
+    MSA_PCKOD_D2(RTYPE, in0, in1, in2, in3, out0, out1)      \
+    MSA_PCKOD_D2(RTYPE, in4, in5, in6, in7, out2, out3)      \
+}
+
+/* Description : Dot product of unsigned byte vector elements.
+ * Arguments   : Inputs  - mult
+ *                         cnst
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Unsigned byte elements from 'mult' are multiplied with
+ *               unsigned byte elements from 'cnst' producing a result
+ *               twice the size of input i.e. unsigned halfword.
+ *               Then this multiplication results of adjacent odd-even elements
+ *               are added together and stored to the out vector.
+ */
+#define MSA_DOTP_UB(RTYPE, mult, cnst, out)                         \
+{                                                                   \
+    out = (RTYPE) __msa_dotp_u_h((v16u8) mult, (v16u8) cnst);       \
+}
+
+#define MSA_DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
+{                                                                   \
+    MSA_DOTP_UB(RTYPE, mult0, cnst0, out0)                          \
+    MSA_DOTP_UB(RTYPE, mult1, cnst1, out1)                          \
+}
+
+#define MSA_DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3,             \
+                     cnst0, cnst1, cnst2, cnst3,                    \
+                     out0, out1, out2, out3)                        \
+{                                                                   \
+    MSA_DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);    \
+    MSA_DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);    \
+}
+
+/* Description : Dot product of signed byte vector elements.
+ * Arguments   : Inputs  - mult
+ *                         cnst
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Signed byte elements from 'mult' are multiplied with
+ *               signed byte elements from 'cnst' producing a result
+ *               twice the size of input i.e. signed halfword.
+ *               Then this multiplication results of adjacent odd-even elements
+ *               are added together and stored to the out vector.
+ */
+#define MSA_DOTP_SB(RTYPE, mult, cnst, out)                         \
+{                                                                   \
+    out = (RTYPE) __msa_dotp_s_h((v16i8) mult, (v16i8) cnst);       \
+}
+
+#define MSA_DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
+{                                                                   \
+    MSA_DOTP_SB(RTYPE, mult0, cnst0, out0)                          \
+    MSA_DOTP_SB(RTYPE, mult1, cnst1, out1)                          \
+}
+
+#define MSA_DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3,             \
+                     cnst0, cnst1, cnst2, cnst3,                    \
+                     out0, out1, out2, out3)                        \
+{                                                                   \
+    MSA_DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);    \
+    MSA_DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);    \
+}
+
+/* Description : Dot product of unsigned halfword vector elements.
+ * Arguments   : Inputs  - mult
+ *                         cnst
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Unsigned halfword elements from 'mult' are multiplied with
+ *               unsigned halfword elements from 'cnst' producing a result
+ *               twice the size of input i.e. unsigned word.
+ *               Then this multiplication results of adjacent odd-even elements
+ *               are added together and stored to the out vector.
+ */
+#define MSA_DOTP_UH(RTYPE, mult, cnst, out)                         \
+{                                                                   \
+    out = (RTYPE) __msa_dotp_u_w((v8u16) mult, (v8u16) cnst);       \
+}
+
+#define MSA_DOTP_UH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
+{                                                                   \
+    MSA_DOTP_UH(RTYPE, mult0, cnst0, out0)                          \
+    MSA_DOTP_UH(RTYPE, mult1, cnst1, out1)                          \
+}
+
+#define MSA_DOTP_UH4(RTYPE, mult0, mult1, mult2, mult3,             \
+                     cnst0, cnst1, cnst2, cnst3,                    \
+                     out0, out1, out2, out3)                        \
+{                                                                   \
+    MSA_DOTP_UH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);    \
+    MSA_DOTP_UH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);    \
+}
+
+/* Description : Dot product of signed halfword vector elements.
+ * Arguments   : Inputs  - mult
+ *                         cnst
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Signed halfword elements from 'mult' are multiplied with
+ *               signed halfword elements from 'cnst' producing a result
+ *               twice the size of input i.e. signed word.
+ *               Then this multiplication results of adjacent odd-even elements
+ *               are added together and stored to the out vector.
+ */
+#define MSA_DOTP_SH(RTYPE, mult, cnst, out)                         \
+{                                                                   \
+    out = (RTYPE) __msa_dotp_s_w((v8i16) mult, (v8i16) cnst);       \
+}
+
+#define MSA_DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
+{                                                                   \
+    MSA_DOTP_SH(RTYPE, mult0, cnst0, out0)                          \
+    MSA_DOTP_SH(RTYPE, mult1, cnst1, out1)                          \
+}
+
+#define MSA_DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3,             \
+                     cnst0, cnst1, cnst2, cnst3,                    \
+                     out0, out1, out2, out3)                        \
+{                                                                   \
+    MSA_DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);    \
+    MSA_DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);    \
+}
+
+/* Description : Dot product & addition of unsigned byte vector elements.
+ * Arguments   : Inputs  - mult
+ *                         cnst
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Unsigned byte elements from 'mult' are multiplied with
+ *               unsigned byte elements from 'cnst' producing a result
+ *               twice the size of input i.e. unsigned halfword.
+ *               Then this multiplication results of adjacent odd-even elements
+ *               are added to the out vector.
+ */
+#define MSA_DPADD_UB(RTYPE, mult, cnst, out)                           \
+{                                                                      \
+    out = (RTYPE) __msa_dpadd_u_h((v8u16) out,                         \
+                                   (v16u8) mult, (v16u8) cnst);        \
+}
+
+#define MSA_DPADD_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1)   \
+{                                                                      \
+    MSA_DPADD_UB(RTYPE, mult0, cnst0, out0)                            \
+    MSA_DPADD_UB(RTYPE, mult1, cnst1, out1)                            \
+}
+
+#define MSA_DPADD_UB4(RTYPE, mult0, mult1, mult2, mult3,               \
+                  cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3)  \
+{                                                                      \
+    MSA_DPADD_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);      \
+    MSA_DPADD_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);      \
+}
+
+/* Description : Dot product & addition of signed byte vector elements.
+ * Arguments   : Inputs  - mult
+ *                         cnst
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Signed byte elements from 'mult' are multiplied with
+ *               signed byte elements from 'cnst' producing a result
+ *               twice the size of input i.e. signed halfword.
+ *               Then this multiplication results of adjacent odd-even elements
+ *               are added to the out vector.
+ */
+#define MSA_DPADD_SB(RTYPE, mult, cnst, out)                           \
+{                                                                      \
+    out = (RTYPE) __msa_dpadd_s_h((v8i16) out,                         \
+                                   (v16i8) mult, (v16i8) cnst);        \
+}
+
+#define MSA_DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1)   \
+{                                                                      \
+    MSA_DPADD_SB(RTYPE, mult0, cnst0, out0)                            \
+    MSA_DPADD_SB(RTYPE, mult1, cnst1, out1)                            \
+}
+
+#define MSA_DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3,               \
+                  cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3)  \
+{                                                                      \
+    MSA_DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);      \
+    MSA_DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);      \
+}
+
+/* Description : Dot product & addition of unsigned halfword vector elements.
+ * Arguments   : Inputs  - mult
+ *                         cnst
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Unsigned halfword elements from 'mult' are multiplied with
+ *               unsigned halfword elements from 'cnst' producing a result
+ *               twice the size of input i.e. unsigned word.
+ *               Then this multiplication results of adjacent odd-even elements
+ *               are added to the out vector.
+ */
+#define MSA_DPADD_UH(RTYPE, mult, cnst, out)                           \
+{                                                                      \
+    out = (RTYPE) __msa_dpadd_u_w((v4u32) out,                         \
+                                   (v8u16) mult, (v8u16) cnst);        \
+}
+
+#define MSA_DPADD_UH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1)   \
+{                                                                      \
+    MSA_DPADD_UH(RTYPE, mult0, cnst0, out0)                            \
+    MSA_DPADD_UH(RTYPE, mult1, cnst1, out1)                            \
+}
+
+#define MSA_DPADD_UH4(RTYPE, mult0, mult1, mult2, mult3,               \
+                  cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3)  \
+{                                                                      \
+    MSA_DPADD_UH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);      \
+    MSA_DPADD_UH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);      \
+}
+
+/* Description : Dot product & addition of signed halfword vector elements.
+ * Arguments   : Inputs  - mult
+ *                         cnst
+ *               Outputs - out
+ *               Return Type - as per RTYPE
+ * Details     : Signed halfword elements from 'mult' are multiplied with
+ *               signed halfword elements from 'cnst' producing a result
+ *               twice the size of input i.e. signed word.
+ *               Then this multiplication results of adjacent odd-even elements
+ *               are added to the out vector.
+ */
+#define MSA_DPADD_SH(RTYPE, mult, cnst, out)                           \
+{                                                                      \
+    out = (RTYPE) __msa_dpadd_s_w((v4i32) out,                         \
+                                   (v8i16) mult, (v8i16) cnst);        \
+}
+
+#define MSA_DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1)   \
+{                                                                      \
+    MSA_DPADD_SH(RTYPE, mult0, cnst0, out0)                            \
+    MSA_DPADD_SH(RTYPE, mult1, cnst1, out1)                            \
+}
+
+#define MSA_DPADD_SH4(RTYPE, mult0, mult1, mult2, mult3,               \
+                  cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3)  \
+{                                                                      \
+    MSA_DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);      \
+    MSA_DPADD_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);      \
+}
+
+/* Description : Clip all signed halfword elements of input vector between min & max.
+ *               out = ((in) < (min)) ? (min) : (((in) > (max)) ? (max) : (in)).
+ * Arguments   : Inputs  - in    (input vector)
+ *                       - min   (min threshold)
+ *                       - max   (max threshold)
+ *               Outputs - in    (output vector with clipped elements)
+ * Note        : type of 'in' must be v8i16.
+ */
+#define MSA_CLIP_SH(in, min, max)                 \
+{                                                 \
+    in = __msa_max_s_h((v8i16) min, (v8i16) in);  \
+    in = __msa_min_s_h((v8i16) max, (v8i16) in);  \
+}
+
+/* Description : Clip all signed halfword elements of input vector between 0 & 255.
+ * Arguments   : Inputs  - in    (input vector)
+ *               Outputs - in    (output vector with clipped elements)
+ * Note        : type of 'in' must be v8i16.
+ */
+#define MSA_CLIP_SH_0_255(in)                   \
+{                                               \
+    in = __msa_maxi_s_h((v8i16) in, 0);         \
+    in = (v8i16) __msa_sat_u_h((v8u16) in, 7);  \
+}
+
+#define MSA_CLIP_SH2_0_255(in0, in1)            \
+{                                               \
+    MSA_CLIP_SH_0_255(in0);                     \
+    MSA_CLIP_SH_0_255(in1);                     \
+}
+
+#define MSA_CLIP_SH4_0_255(in0, in1, in2, in3)  \
+{                                               \
+    MSA_CLIP_SH2_0_255(in0, in1);               \
+    MSA_CLIP_SH2_0_255(in2, in3);               \
+}
+
+#define MSA_CLIP_SH8_0_255(in0, in1, in2, in3,  \
+                       in4, in5, in6, in7)      \
+{                                               \
+    MSA_CLIP_SH4_0_255(in0, in1, in2, in3);     \
+    MSA_CLIP_SH4_0_255(in4, in5, in6, in7);     \
+}
+
+/* Description : Clip all signed word elements of input vector between 0 & 255.
+ * Arguments   : Inputs  - in    (input vector)
+ *               Outputs - in    (output vector with clipped elements)
+ * Note        : type of 'in' must be v4i32.
+ */
+#define MSA_CLIP_SW_0_255(in)                   \
+{                                               \
+    in = __msa_maxi_s_w((v4i32) in, 0);         \
+    in = (v4i32) __msa_sat_u_w((v4u32) in, 7);  \
+}
+
+#define MSA_CLIP_SW2_0_255(in0, in1)            \
+{                                               \
+    MSA_CLIP_SW_0_255(in0);                     \
+    MSA_CLIP_SW_0_255(in1);                     \
+}
+
+#define MSA_CLIP_SW4_0_255(in0, in1, in2, in3)  \
+{                                               \
+    MSA_CLIP_SW2_0_255(in0, in1);               \
+    MSA_CLIP_SW2_0_255(in2, in3);               \
+}
+
+#define MSA_CLIP_SW8_0_255(in0, in1, in2, in3,  \
+                       in4, in5, in6, in7)      \
+{                                               \
+    MSA_CLIP_SW4_0_255(in0, in1, in2, in3);     \
+    MSA_CLIP_SW4_0_255(in4, in5, in6, in7);     \
+}
+
+/* Description : Addition of 16 unsigned byte elements.
+ *               16 unsigned byte elements of input vector are added
+ *               together and resulted integer sum is returned.
+ * Arguments   : Inputs  - in       (unsigned byte vector)
+ *               Outputs - sum_m    (u32 sum)
+ *               Return Type - unsigned word
+ */
+#define MSA_HADD_UB_U32(in, sum_m)                       \
+{                                                        \
+    v8u16 res_m;                                         \
+    v4u32 res0_m;                                        \
+    v2u64 res1_m, res2_m;                                \
+                                                         \
+    res_m = __msa_hadd_u_h((v16u8) in, (v16u8) in);      \
+    res0_m = __msa_hadd_u_w(res_m, res_m);               \
+    res1_m = __msa_hadd_u_d(res0_m, res0_m);             \
+    res2_m = (v2u64) __msa_splati_d((v2i64) res1_m, 1);  \
+    res1_m += res2_m;                                    \
+    sum_m = __msa_copy_u_w((v4i32) res1_m, 0);           \
+}
+
+/* Description : Addition of 8 unsigned halfword elements.
+ *               8 unsigned halfword elements of input vector are added
+ *               together and resulted integer sum is returned.
+ * Arguments   : Inputs  - in       (unsigned halfword vector)
+ *               Outputs - sum_m    (u32 sum)
+ *               Return Type - unsigned word
+ */
+#define MSA_HADD_UH_U32(in, sum_m)                       \
+{                                                        \
+    v4u32 res_m;                                         \
+    v2u64 res0_m, res1_m;                                \
+                                                         \
+    res_m = __msa_hadd_u_w((v8u16) in, (v8u16) in);      \
+    res0_m = __msa_hadd_u_d(res_m, res_m);               \
+    res1_m = (v2u64) __msa_splati_d((v2i64) res0_m, 1);  \
+    res0_m += res1_m;                                    \
+    sum_m = __msa_copy_u_w((v4i32) res0_m, 0);           \
+}
+
+/* Description : Addition of 4 unsigned word elements.
+ *               4 unsigned word elements of input vector are added together and
+ *               resulted integer sum is returned.
+ * Arguments   : Inputs  - in       (unsigned word vector)
+ *               Outputs - sum_m    (u32 sum)
+ *               Return Type - unsigned word
+ */
+#define MSA_HADD_UW_U32(in, sum_m)                       \
+{                                                        \
+    v2u64 res0_m, res1_m;                                \
+                                                         \
+    res0_m = __msa_hadd_u_d((v4u32) in, (v4u32) in);     \
+    res1_m = (v2u64) __msa_splati_d((v2i64) res0_m, 1);  \
+    res0_m += res1_m;                                    \
+    sum_m = __msa_copy_u_w((v4i32) res0_m, 0);           \
+}
+
+/* Description : Addition of 16 signed byte elements.
+ *               16 signed byte elements of input vector are added
+ *               together and resulted integer sum is returned.
+ * Arguments   : Inputs  - in       (signed byte vector)
+ *               Outputs - sum_m    (i32 sum)
+ *               Return Type - signed word
+ */
+#define MSA_HADD_SB_S32(in, sum_m)                   \
+{                                                    \
+    v8i16 res_m;                                     \
+    v4i32 res0_m;                                    \
+    v2i64 res1_m, res2_m;                            \
+                                                     \
+    res_m = __msa_hadd_s_h((v16i8) in, (v16i8) in);  \
+    res0_m = __msa_hadd_s_w(res_m, res_m);           \
+    res1_m = __msa_hadd_s_d(res0_m, res0_m);         \
+    res2_m = __msa_splati_d(res1_m, 1);              \
+    res1_m += res2_m;                                \
+    sum_m = __msa_copy_s_w((v4i32) res1_m, 0);       \
+}
+
+/* Description : Addition of 8 signed halfword elements.
+ *               8 signed halfword elements of input vector are added
+ *               together and resulted integer sum is returned.
+ * Arguments   : Inputs  - in       (signed halfword vector)
+ *               Outputs - sum_m    (i32 sum)
+ *               Return Type - signed word
+ */
+#define MSA_HADD_SH_S32(in, sum_m)                   \
+{                                                    \
+    v4i32 res_m;                                     \
+    v2i64 res0_m, res1_m;                            \
+                                                     \
+    res_m = __msa_hadd_s_w((v8i16) in, (v8i16) in);  \
+    res0_m = __msa_hadd_s_d(res_m, res_m);           \
+    res1_m = __msa_splati_d(res0_m, 1);              \
+    res0_m += res1_m;                                \
+    sum_m = __msa_copy_s_w((v4i32) res0_m, 0);       \
+}
+
+/* Description : Addition of 4 signed word elements.
+ *               4 signed word elements of input vector are added together and
+ *               resulted integer sum is returned.
+ * Arguments   : Inputs  - in       (signed word vector)
+ *               Outputs - sum_m    (i32 sum)
+ *               Return Type - signed word
+ */
+#define MSA_HADD_SW_S32(in, sum_m)                    \
+{                                                     \
+    v2i64 res0_m, res1_m;                             \
+                                                      \
+    res0_m = __msa_hadd_s_d((v4i32) in, (v4i32) in);  \
+    res1_m = __msa_splati_d(res0_m, 1);               \
+    res0_m += res1_m;                                 \
+    sum_m = __msa_copy_s_w((v4i32) res0_m, 0);        \
+}
+
+/* Description : Saturate the unsigned halfword element values to the max
+ *               unsigned value of (sat_val+1 bits).
+ *               The element data width remains unchanged.
+ * Arguments   : Inputs  - in, sat_val
+ *               Outputs - in (in place)
+ *               Return Type - v8u16
+ * Details     : Each unsigned halfword element from 'in' is saturated to the
+ *               value generated with (sat_val+1) bit range.
+ *               Results are in placed to original vectors.
+ */
+#define MSA_SAT_UH(in, sat_val)                   \
+{                                                 \
+    in = __msa_sat_u_h(in, sat_val);              \
+}
+
+#define MSA_SAT_UH2(in0, in1, sat_val)            \
+{                                                 \
+    MSA_SAT_UH(in0, sat_val)                      \
+    MSA_SAT_UH(in1, sat_val)                      \
+}
+
+#define MSA_SAT_UH4(in0, in1, in2, in3, sat_val)  \
+{                                                 \
+    MSA_SAT_UH2(in0, in1, sat_val)                \
+    MSA_SAT_UH2(in2, in3, sat_val)                \
+}
+
+/* Description : Saturate the signed halfword element values to the max
+ *               signed value of (sat_val+1 bits).
+ *               The element data width remains unchanged.
+ * Arguments   : Inputs  - in, sat_val
+ *               Outputs - in (in place)
+ *               Return Type - v8i16
+ * Details     : Each signed halfword element from 'in' is saturated to the
+ *               value generated with (sat_val+1) bit range.
+ *               Results are in placed to original vectors.
+ */
+#define MSA_SAT_SH(in, sat_val)                   \
+{                                                 \
+    in = __msa_sat_s_h(in, sat_val);              \
+}
+
+#define MSA_SAT_SH2(in0, in1, sat_val)            \
+{                                                 \
+    MSA_SAT_SH(in0, sat_val)                      \
+    MSA_SAT_SH(in1, sat_val)                      \
+}
+
+#define MSA_SAT_SH4(in0, in1, in2, in3, sat_val)  \
+{                                                 \
+    MSA_SAT_SH2(in0, in1, sat_val)                \
+    MSA_SAT_SH2(in2, in3, sat_val)                \
+}
+
+/* Description : Saturate the unsigned word element values to the max
+ *               unsigned value of (sat_val+1 bits).
+ *               The element data width remains unchanged.
+ * Arguments   : Inputs  - in, sat_val
+ *               Outputs - in (in place)
+ *               Return Type - v4u32
+ * Details     : Each unsigned word element from 'in' is saturated to the
+ *               value generated with (sat_val+1) bit range.
+ *               Results are in placed to original vectors.
+ */
+#define MSA_SAT_UW(in, sat_val)                   \
+{                                                 \
+    in = __msa_sat_u_w(in, sat_val);              \
+}
+
+#define MSA_SAT_UW2(in0, in1, sat_val)            \
+{                                                 \
+    MSA_SAT_UW(in0, sat_val)                      \
+    MSA_SAT_UW(in1, sat_val)                      \
+}
+
+#define MSA_SAT_UW4(in0, in1, in2, in3, sat_val)  \
+{                                                 \
+    MSA_SAT_UW2(in0, in1, sat_val)                \
+    MSA_SAT_UW2(in2, in3, sat_val)                \
+}
+
+/* Description : Saturate the signed word element values to the max
+ *               signed value of (sat_val+1 bits).
+ *               The element data width remains unchanged.
+ * Arguments   : Inputs  - in, sat_val
+ *               Outputs - in (in place)
+ *               Return Type - v4i32
+ * Details     : Each signed word element from 'in' is saturated to the
+ *               value generated with (sat_val+1) bit range.
+ *               Results are in placed to original vectors.
+ */
+#define MSA_SAT_SW(in, sat_val)                   \
+{                                                 \
+    in = __msa_sat_s_w(in, sat_val);              \
+}
+
+#define MSA_SAT_SW2(in0, in1, sat_val)            \
+{                                                 \
+    MSA_SAT_SW(in0, sat_val)                      \
+    MSA_SAT_SW(in1, sat_val)                      \
+}
+
+#define MSA_SAT_SW4(in0, in1, in2, in3, sat_val)  \
+{                                                 \
+    MSA_SAT_SW2(in0, in1, sat_val)                \
+    MSA_SAT_SW2(in2, in3, sat_val)                \
+}
+
+/* Description : Each byte element is logically xor'ed with immediate 128.
+ * Arguments   : Inputs  - in
+ *               Outputs - in (in-place)
+ *               Return Type - as per RTYPE
+ * Details     : Each unsigned byte element from input vector 'in' is
+ *               logically xor'ed with 128 and result is in-place stored in
+ *               'in' vector.
+ */
+#define MSA_XORI_B_128(RTYPE, in)                 \
+{                                                 \
+     in = (RTYPE) __msa_xori_b((v16u8) in, 128);  \
+}
+
+#define MSA_XORI_B2_128(RTYPE, in0, in1)  \
+{                                         \
+    MSA_XORI_B_128(RTYPE, in0);           \
+    MSA_XORI_B_128(RTYPE, in1);           \
+}
+
+#define MSA_XORI_B4_128(RTYPE, in0, in1, in2, in3)  \
+{                                                   \
+    MSA_XORI_B2_128(RTYPE, in0, in1);               \
+    MSA_XORI_B2_128(RTYPE, in2, in3);               \
+}
+
+/* Description : Shift right logical all byte elements of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right logical by
+ *               number of bits respective element holds in vector 'shift' and
+ *               result is in place written to 'in'.
+ *               Here, 'shift' is a vector passed in.
+ */
+#define MSA_SRL_B(RTYPE, in, shift)                       \
+{                                                         \
+    in = (RTYPE) __msa_srl_b((v16i8) in, (v16i8) shift);  \
+}
+
+#define MSA_SRL_B2(RTYPE, in0, in1, shift)  \
+{                                           \
+    MSA_SRL_B(RTYPE, in0, shift);           \
+    MSA_SRL_B(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRL_B4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                     \
+    MSA_SRL_B2(RTYPE, in0, in1, shift);               \
+    MSA_SRL_B2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right logical all halfword elements of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right logical by
+ *               number of bits respective element holds in vector 'shift' and
+ *               result is in place written to 'in'.
+ *               Here, 'shift' is a vector passed in.
+ */
+#define MSA_SRL_H(RTYPE, in, shift)                       \
+{                                                         \
+    in = (RTYPE) __msa_srl_h((v8i16) in, (v8i16) shift);  \
+}
+
+#define MSA_SRL_H2(RTYPE, in0, in1, shift)  \
+{                                           \
+    MSA_SRL_H(RTYPE, in0, shift);           \
+    MSA_SRL_H(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRL_H4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                     \
+    MSA_SRL_H2(RTYPE, in0, in1, shift);               \
+    MSA_SRL_H2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right logical all word elements of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right logical by
+ *               number of bits respective element holds in vector 'shift' and
+ *               result is in place written to 'in'.
+ *               Here, 'shift' is a vector passed in.
+ */
+#define MSA_SRL_W(RTYPE, in, shift)                       \
+{                                                         \
+    in = (RTYPE) __msa_srl_w((v4i32) in, (v4i32) shift);  \
+}
+
+#define MSA_SRL_W2(RTYPE, in0, in1, shift)  \
+{                                           \
+    MSA_SRL_W(RTYPE, in0, shift);           \
+    MSA_SRL_W(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRL_W4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                     \
+    MSA_SRL_W2(RTYPE, in0, in1, shift);               \
+    MSA_SRL_W2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right logical all double word elements of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right logical by
+ *               number of bits respective element holds in vector 'shift' and
+ *               result is in place written to 'in'.
+ *               Here, 'shift' is a vector passed in.
+ */
+#define MSA_SRL_D(RTYPE, in, shift)                       \
+{                                                         \
+    in = (RTYPE) __msa_srl_d((v2i64) in, (v2i64) shift);  \
+}
+
+#define MSA_SRL_D2(RTYPE, in0, in1, shift)  \
+{                                           \
+    MSA_SRL_D(RTYPE, in0, shift);           \
+    MSA_SRL_D(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRL_D4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                     \
+    MSA_SRL_D2(RTYPE, in0, in1, shift);               \
+    MSA_SRL_D2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right logical rounded all byte elements of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right logical rounded
+ *               by number of bits respective element holds in vector 'shift'
+ *               and result is in place written to 'in'.
+ *               Here, 'shift' is a vector passed in.
+ */
+#define MSA_SRLR_B(RTYPE, in, shift)                       \
+{                                                          \
+    in = (RTYPE) __msa_srlr_b((v16i8) in, (v16i8) shift);  \
+}
+
+#define MSA_SRLR_B2(RTYPE, in0, in1, shift)  \
+{                                            \
+    MSA_SRLR_B(RTYPE, in0, shift);           \
+    MSA_SRLR_B(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRLR_B4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                      \
+    MSA_SRLR_B2(RTYPE, in0, in1, shift);               \
+    MSA_SRLR_B2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right logical rounded all halfword elements of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right logical rounded
+ *               by number of bits respective element holds in vector 'shift'
+ *               and result is in place written to 'in'.
+ *               Here, 'shift' is a vector passed in.
+ */
+#define MSA_SRLR_H(RTYPE, in, shift)                       \
+{                                                          \
+    in = (RTYPE) __msa_srlr_h((v8i16) in, (v8i16) shift);  \
+}
+
+#define MSA_SRLR_H2(RTYPE, in0, in1, shift)  \
+{                                            \
+    MSA_SRLR_H(RTYPE, in0, shift);           \
+    MSA_SRLR_H(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRLR_H4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                      \
+    MSA_SRLR_H2(RTYPE, in0, in1, shift);               \
+    MSA_SRLR_H2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right logical rounded all word elements of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right logical rounded
+ *               by number of bits respective element holds in vector 'shift'
+ *               and result is in place written to 'in'.
+ *               Here, 'shift' is a vector passed in.
+ */
+#define MSA_SRLR_W(RTYPE, in, shift)                       \
+{                                                          \
+    in = (RTYPE) __msa_srlr_w((v4i32) in, (v4i32) shift);  \
+}
+
+#define MSA_SRLR_W2(RTYPE, in0, in1, shift)  \
+{                                            \
+    MSA_SRLR_W(RTYPE, in0, shift);           \
+    MSA_SRLR_W(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRLR_W4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                      \
+    MSA_SRLR_W2(RTYPE, in0, in1, shift);               \
+    MSA_SRLR_W2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right logical rounded all double word elements of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right logical rounded
+ *               by number of bits respective element holds in vector 'shift'
+ *               and result is in place written to 'in'.
+ *               Here, 'shift' is a vector passed in.
+ */
+#define MSA_SRLR_D(RTYPE, in, shift)                       \
+{                                                          \
+    in = (RTYPE) __msa_srlr_d((v2i64) in, (v2i64) shift);  \
+}
+
+#define MSA_SRLR_D2(RTYPE, in0, in1, shift)  \
+{                                            \
+    MSA_SRLR_D(RTYPE, in0, shift);           \
+    MSA_SRLR_D(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRLR_D4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                      \
+    MSA_SRLR_D2(RTYPE, in0, in1, shift);               \
+    MSA_SRLR_D2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right arithmetic rounded all byte elements of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right arithmetic
+ *               rounded by number of bits respective element holds in
+ *               vector 'shift' and result is in place written to 'in'.
+ *               Here, 'shift' is a vector passed in.
+ */
+#define MSA_SRAR_B(RTYPE, in, shift)                       \
+{                                                          \
+    in = (RTYPE) __msa_srar_b((v16i8) in, (v16i8) shift);  \
+}
+
+#define MSA_SRAR_B2(RTYPE, in0, in1, shift)  \
+{                                            \
+    MSA_SRAR_B(RTYPE, in0, shift);           \
+    MSA_SRAR_B(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRAR_B4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                      \
+    MSA_SRAR_B2(RTYPE, in0, in1, shift);               \
+    MSA_SRAR_B2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right arithmetic rounded all halfword elements of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right arithmetic
+ *               rounded by number of bits respective element holds in
+ *               vector 'shift' and result is in place written to 'in'.
+ *               Here, 'shift' is a vector passed in.
+ */
+#define MSA_SRAR_H(RTYPE, in, shift)                       \
+{                                                          \
+    in = (RTYPE) __msa_srar_h((v8i16) in, (v8i16) shift);  \
+}
+
+#define MSA_SRAR_H2(RTYPE, in0, in1, shift)  \
+{                                            \
+    MSA_SRAR_H(RTYPE, in0, shift);           \
+    MSA_SRAR_H(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRAR_H4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                      \
+    MSA_SRAR_H2(RTYPE, in0, in1, shift);               \
+    MSA_SRAR_H2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right arithmetic rounded all word elements of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right arithmetic
+ *               rounded by number of bits respective element holds in
+ *               vector 'shift' and result is in place written to 'in'.
+ *               Here, 'shift' is a vector passed in.
+ */
+#define MSA_SRAR_W(RTYPE, in, shift)                       \
+{                                                          \
+    in = (RTYPE) __msa_srar_w((v4i32) in, (v4i32) shift);  \
+}
+
+#define MSA_SRAR_W2(RTYPE, in0, in1, shift)  \
+{                                            \
+    MSA_SRAR_W(RTYPE, in0, shift);           \
+    MSA_SRAR_W(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRAR_W4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                      \
+    MSA_SRAR_W2(RTYPE, in0, in1, shift);               \
+    MSA_SRAR_W2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right arithmetic rounded all double word elements
+ *               of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right arithmetic
+ *               rounded by number of bits respective element holds in
+ *               vector 'shift' and result is in place written to 'in'.
+ *               Here, 'shift' is a vector passed in.
+ */
+#define MSA_SRAR_D(RTYPE, in, shift)                       \
+{                                                          \
+    in = (RTYPE) __msa_srar_d((v2i64) in, (v2i64) shift);  \
+}
+
+#define MSA_SRAR_D2(RTYPE, in0, in1, shift)  \
+{                                            \
+    MSA_SRAR_D(RTYPE, in0, shift);           \
+    MSA_SRAR_D(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRAR_D4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                      \
+    MSA_SRAR_D2(RTYPE, in0, in1, shift);               \
+    MSA_SRAR_D2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right arithmetic rounded all byte elements of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right arithmetic
+ *               rounded by number of bits respective element holds in vector
+ *               'shift' and result is in place written to 'in'.
+ *               Here, 'shift' is a immediate number passed in.
+ */
+#define MSA_SRARI_B(RTYPE, in, shift)                       \
+{                                                           \
+    in = (RTYPE) __msa_srari_b((v16i8) in, (v16i8) shift);  \
+}
+
+#define MSA_SRARI_B2(RTYPE, in0, in1, shift)  \
+{                                             \
+    MSA_SRARI_B(RTYPE, in0, shift);           \
+    MSA_SRARI_B(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRARI_B4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                       \
+    MSA_SRARI_B2(RTYPE, in0, in1, shift);               \
+    MSA_SRARI_B2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right arithmetic rounded all halfword elements of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right arithmetic
+ *               rounded by number of bits respective element holds in vector
+ *               'shift' and result is in place written to 'in'.
+ *               Here, 'shift' is a immediate number passed in.
+ */
+#define MSA_SRARI_H(RTYPE, in, shift)                       \
+{                                                           \
+    in = (RTYPE) __msa_srari_h((v8i16) in, (v8i16) shift);  \
+}
+
+#define MSA_SRARI_H2(RTYPE, in0, in1, shift)  \
+{                                             \
+    MSA_SRARI_H(RTYPE, in0, shift);           \
+    MSA_SRARI_H(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRARI_H4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                       \
+    MSA_SRARI_H2(RTYPE, in0, in1, shift);               \
+    MSA_SRARI_H2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right arithmetic rounded all word elements of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right arithmetic
+ *               rounded by number of bits respective element holds in vector
+ *               'shift' and result is in place written to 'in'.
+ *               Here, 'shift' is a immediate number passed in.
+ */
+#define MSA_SRARI_W(RTYPE, in, shift)                       \
+{                                                           \
+    in = (RTYPE) __msa_srari_w((v4i32) in, (v4i32) shift);  \
+}
+
+#define MSA_SRARI_W2(RTYPE, in0, in1, shift)  \
+{                                             \
+    MSA_SRARI_W(RTYPE, in0, shift);           \
+    MSA_SRARI_W(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRARI_W4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                       \
+    MSA_SRARI_W2(RTYPE, in0, in1, shift);               \
+    MSA_SRARI_W2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Shift right arithmetic rounded all double word elements
+ *               of vector.
+ * Arguments   : Inputs  - in, shift
+ *               Outputs - in (in place)
+ *               Return Type - as per RTYPE
+ * Details     : Each element of vector 'in' is shifted right arithmetic
+ *               rounded by number of bits respective element holds in
+ *               vector 'shift' and result is in place written to 'in'.
+ *               Here, 'shift' is a immediate number passed in.
+ */
+#define MSA_SRARI_D(RTYPE, in, shift)                       \
+{                                                           \
+    in = (RTYPE) __msa_srari_d((v2i64) in, (v2i64) shift);  \
+}
+
+#define MSA_SRARI_D2(RTYPE, in0, in1, shift)  \
+{                                             \
+    MSA_SRARI_D(RTYPE, in0, shift);           \
+    MSA_SRARI_D(RTYPE, in1, shift);           \
+}
+
+#define MSA_SRARI_D4(RTYPE, in0, in1, in2, in3, shift)  \
+{                                                       \
+    MSA_SRARI_D2(RTYPE, in0, in1, shift);               \
+    MSA_SRARI_D2(RTYPE, in2, in3, shift);               \
+}
+
+/* Description : Transposes input 4x4 byte block.
+ * Arguments   : Inputs  - in0, in1, in2, in3      (input 4x4 byte block)
+ *               Outputs - out0, out1, out2, out3  (output 4x4 byte block)
+ *               Return Type - RTYPE
+ * Details     :
+ */
+#define MSA_TRANSPOSE4x4_B(RTYPE, in0, in1, in2, in3,         \
+                           out0, out1, out2, out3)            \
+{                                                             \
+    v16i8 zero_m = { 0 };                                     \
+                                                              \
+    MSA_ILVR_B2(RTYPE, in2, in0, in3, in1, out2, out3);       \
+    out0 = (RTYPE) __msa_ilvr_b((v16i8) out3, (v16i8) out2);  \
+    out1 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out0, 4);     \
+    out2 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out1, 4);     \
+    out3 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out2, 4);     \
+}
+
+/* Description : Transposes input 8x4 byte block into 4x8.
+ * Arguments   : Inputs  - in0, in1, in2 ~ in7     (input 8x4 byte block)
+ *               Outputs - out0, out1, out2, out3  (output 4x8 byte block)
+ *               Return Type - RTYPE
+ * Details     :
+ */
+#define MSA_TRANSPOSE8x4_B(RTYPE, in0, in1, in2, in3, in4, in5,  \
+                           in6, in7, out0, out1, out2, out3)     \
+{                                                                \
+    v16i8 zero_m = { 0 };                                        \
+                                                                 \
+    MSA_ILVR_B4(RTYPE, in2, in0, in3, in1, in6, in4, in7, in5,   \
+                out0, out1, out2, out3);                         \
+    MSA_ILVR_H2(RTYPE, out2, out0, out3, out1, out2, out3);      \
+    out0 = (RTYPE) __msa_ilvr_b((v16i8) out3, (v16i8) out2);     \
+    out1 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out0, 8);        \
+    out2 = (RTYPE) __msa_ilvl_b((v16i8) out3, (v16i8) out2);     \
+    out3 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out2, 8);        \
+}
+
+/* Description : Transposes 16x4 block into 4x16 with byte elements in vectors.
+ * Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7,
+ *                         in8, in9, in10, in11, in12, in13, in14, in15
+ *               Outputs - out0, out1, out2, out3
+ *               Return Type - RTYPE
+ * Details     :
+ */
+#define MSA_TRANSPOSE16x4_B(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
+                            in8, in9, in10, in11, in12, in13, in14, in15,   \
+                            out0, out1, out2, out3)                         \
+{                                                                           \
+    v2i64 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                   \
+                                                                            \
+    MSA_ILVR_B4(RTYPE, in2, in0, in3, in1, in6, in4, in7, in5,              \
+                out0, out1, out2, out3);                                    \
+    MSA_ILVR_H2(RTYPE, out2, out0, out3, out1, out2, out3);                 \
+    MSA_ILVRL_B2(v2i64, out3, out2, tmp0_m, tmp1_m);                        \
+                                                                            \
+    MSA_ILVR_B4(RTYPE, in10, in8, in11, in9, in14, in12, in15, in13,        \
+                out0, out1, out2, out3);                                    \
+    MSA_ILVR_H2(RTYPE, out2, out0, out3, out1, out2, out3);                 \
+    MSA_ILVRL_B2(v2i64, out3, out2, tmp2_m, tmp3_m);                        \
+                                                                            \
+    MSA_ILVRL_D4(RTYPE, tmp2_m, tmp0_m, tmp3_m, tmp1_m,                     \
+                 out0, out1, out2, out3);                                   \
+}
+
+/* Description : Transposes input 8x8 byte block.
+ * Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7
+ *                         (input 8x8 byte block)
+ *               Outputs - out0, out1, out2, out3, out4, out5, out6, out7
+ *                         (output 8x8 byte block)
+ *               Return Type - RTYPE
+ * Details     :
+ */
+#define MSA_TRANSPOSE8x8_B(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,   \
+                           out0, out1, out2, out3, out4, out5, out6, out7)  \
+{                                                                           \
+    v16i8 zero_m = {0};                                                     \
+                                                                            \
+    MSA_ILVR_B4(RTYPE, in2, in0, in3, in1, in6, in4, in7, in5,              \
+            out0, out1, out2, out3);                                        \
+    MSA_ILVRL_B4(RTYPE, out1, out0, out3, out2, out4, out5, out6, out7);    \
+    MSA_ILVRL_W4(RTYPE, out6, out4, out7, out5, out0, out2, out4, out6);    \
+    out1 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out0, 8);                   \
+    out3 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out2, 8);                   \
+    out5 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out4, 8);                   \
+    out7 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out6, 8);                   \
+}
+
+/* Description : Transposes 16x8 block into 8x16 with byte elements in vectors.
+ * Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7,
+ *                         in8, in9, in10, in11, in12, in13, in14, in15
+ *               Outputs - out0, out1, out2, out3, out4, out5, out6, out7
+ *               Return Type - RTYPE
+ * Details     :
+ */
+#define MSA_TRANSPOSE16x8_B(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,   \
+                            in8, in9, in10, in11, in12, in13, in14, in15,    \
+                            out0, out1, out2, out3, out4, out5, out6, out7)  \
+{                                                                            \
+    v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                    \
+                                                                             \
+    MSA_ILVEV_D4(RTYPE, in8, in0, in9, in1, in10, in2, in11, in3,            \
+                 out7, out6, out5, out4);                                    \
+    MSA_ILVEV_D4(RTYPE, in12, in4, in13, in5, in14, in6, in15, in7,          \
+                 out3, out2, out1, out0);                                    \
+                                                                             \
+    tmp0_m =  __msa_ilvev_b((v16i8) out6, (v16i8) out7);                     \
+    tmp1_m =  __msa_ilvod_b((v16i8) out6, (v16i8) out7);                     \
+    out6 = (RTYPE) __msa_ilvev_b((v16i8) out4, (v16i8) out5);                \
+    out5 = (RTYPE) __msa_ilvod_b((v16i8) out4, (v16i8) out5);                \
+    tmp2_m = __msa_ilvev_b((v16i8) out2, (v16i8) out3);                      \
+    tmp3_m = __msa_ilvod_b((v16i8) out2, (v16i8) out3);                      \
+    out2 = (RTYPE) __msa_ilvev_b((v16i8) out0, (v16i8) out1);                \
+    out1 = (RTYPE) __msa_ilvod_b((v16i8) out0, (v16i8) out1);                \
+                                                                             \
+    MSA_ILVEV_H2(RTYPE, out6, tmp0_m, out2, tmp2_m, out3, out7);             \
+    out0 = (RTYPE) __msa_ilvev_w((v4i32) out7, (v4i32) out3);                \
+    out4 = (RTYPE) __msa_ilvod_w((v4i32) out7, (v4i32) out3);                \
+                                                                             \
+    MSA_ILVOD_H2(RTYPE, out6, tmp0_m, out2, tmp2_m, out3, out7);             \
+    out2 = (RTYPE) __msa_ilvev_w((v4i32) out7, (v4i32) out3);                \
+    out6 = (RTYPE) __msa_ilvod_w((v4i32) out7, (v4i32) out3);                \
+                                                                             \
+    MSA_ILVOD_H2(v16i8, out5, tmp1_m, out1, tmp3_m, tmp0_m, tmp2_m);         \
+    out3 = (RTYPE) __msa_ilvev_w((v4i32) tmp2_m, (v4i32) tmp0_m);            \
+    out7 = (RTYPE) __msa_ilvod_w((v4i32) tmp2_m, (v4i32) tmp0_m);            \
+                                                                             \
+    MSA_ILVEV_H2(v16i8, out5, tmp1_m, out1, tmp3_m, tmp0_m, tmp2_m);         \
+    out1 = (RTYPE) __msa_ilvev_w((v4i32) tmp2_m, (v4i32) tmp0_m);            \
+    out5 = (RTYPE) __msa_ilvod_w((v4i32) tmp2_m, (v4i32) tmp0_m);            \
+}
+
+/* Description : Transposes 4x4 block with half word elements in vectors.
+ * Arguments   : Inputs  - in0, in1, in2, in3
+ *               Outputs - out0, out1, out2, out3
+ *               Return Type - RTYPE
+ * Details     :
+ */
+#define MSA_TRANSPOSE4x4_H(RTYPE, in0, in1, in2, in3,         \
+                           out0, out1, out2, out3)            \
+{                                                             \
+    MSA_ILVR_H2(RTYPE, in1, in0, in3, in2, out1, out3);       \
+    MSA_ILVRL_W2(RTYPE, out3, out1, out0, out2);              \
+    MSA_ILVL_D2(RTYPE, out0, out0, out2, out2, out1, out3);   \
+}
+
+/* Description : Transposes 8x4 block with half word elements in vectors.
+ * Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7
+ *               Outputs - out0, out1, out2, out3
+ *               Return Type - RTYPE
+ * Details     :
+ */
+#define MSA_TRANSPOSE8x4_H(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,   \
+                           out0, out1, out2, out3)                          \
+{                                                                           \
+    v8i16 s0_m, s1_m;                                                       \
+    v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                   \
+                                                                            \
+    MSA_ILVR_H2(v8i16, in6, in4, in7, in5, s0_m, s1_m);                     \
+    MSA_ILVRL_H2(v8i16, s1_m, s0_m, tmp0_m, tmp1_m);                        \
+    MSA_ILVR_H2(v8i16, in2, in0, in3, in1, s0_m, s1_m);                     \
+    MSA_ILVRL_H2(v8i16, s1_m, s0_m, tmp2_m, tmp3_m);                        \
+    MSA_PCKEV_D2(RTYPE, tmp0_m, tmp2_m, tmp1_m, tmp3_m, out0, out2);        \
+    MSA_PCKOD_D2(RTYPE, tmp0_m, tmp2_m, tmp1_m, tmp3_m, out1, out3);        \
+}
+
+/* Description : Transposes 8x8 block with half word elements in vectors.
+ * Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7
+ *               Outputs - out0, out1, out2, out3, out4, out5, out6, out7
+ *               Return Type - RTYPE
+ * Details     :
+ */
+#define MSA_TRANSPOSE8x8_H(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,   \
+                           out0, out1, out2, out3, out4, out5, out6, out7)  \
+{                                                                           \
+    v8i16 s0_m, s1_m;                                                       \
+    v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                   \
+    v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m;                                   \
+                                                                            \
+    MSA_ILVR_H2(v8i16, in6, in4, in7, in5, s0_m, s1_m);                     \
+    MSA_ILVRL_H2(v8i16, s1_m, s0_m, tmp0_m, tmp1_m);                        \
+    MSA_ILVL_H2(v8i16, in6, in4, in7, in5, s0_m, s1_m);                     \
+    MSA_ILVRL_H2(v8i16, s1_m, s0_m, tmp2_m, tmp3_m);                        \
+    MSA_ILVR_H2(v8i16, in2, in0, in3, in1, s0_m, s1_m);                     \
+    MSA_ILVRL_H2(v8i16, s1_m, s0_m, tmp4_m, tmp5_m);                        \
+    MSA_ILVL_H2(v8i16, in2, in0, in3, in1, s0_m, s1_m);                     \
+    MSA_ILVRL_H2(v8i16, s1_m, s0_m, tmp6_m, tmp7_m);                        \
+    MSA_PCKEV_D4(RTYPE, tmp0_m, tmp4_m, tmp1_m, tmp5_m, tmp2_m, tmp6_m,     \
+             tmp3_m, tmp7_m, out0, out2, out4, out6);                       \
+    MSA_PCKOD_D4(RTYPE, tmp0_m, tmp4_m, tmp1_m, tmp5_m, tmp2_m, tmp6_m,     \
+             tmp3_m, tmp7_m, out1, out3, out5, out7);                       \
+}
+
+#endif /* _MSA_MACROS_H */
--- /dev/null
+++ b/codec/common/mips/copy_mb_msa.c
@@ -1,0 +1,80 @@
+/*!
+ * \copy
+ *     Copyright (C) 2020 Loongson Technology Co. Ltd.
+ *     Contributed by Gu Xiwei(guxiwei-hf@loongson.cn)
+ *     All rights reserved.
+ *
+ *     Redistribution and use in source and binary forms, with or without
+ *     modification, are permitted provided that the following conditions
+ *     are met:
+ *
+ *        * Redistributions of source code must retain the above copyright
+ *          notice, this list of conditions and the following disclaimer.
+ *
+ *        * Redistributions in binary form must reproduce the above copyright
+ *          notice, this list of conditions and the following disclaimer in
+ *          the documentation and/or other materials provided with the
+ *          distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ *     FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ *     COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *     BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ *     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ *     LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ *     ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ *     POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * \file    copy_mb_msa.c
+ *
+ * \brief   MIPS MSA optimizations
+ *
+ * \date    14/05/2020 Created
+ *
+ *************************************************************************************
+ */
+
+#include <stdint.h>
+#include "msa_macros.h"
+
+void WelsCopy8x8_msa(uint8_t* pDst, int32_t iStrideD, uint8_t* pSrc,
+                     int32_t  iStrideS ) {
+    v16u8 src0, src1;
+    for (int i = 0; i < 4; i++) {
+        MSA_LD_V2(v16u8, pSrc, iStrideS, src0, src1);
+        MSA_ST_D(src0, 0, pDst);
+        MSA_ST_D(src1, 0, pDst + iStrideD);
+        pSrc += 2 * iStrideS;
+        pDst += 2 * iStrideD;
+    }
+}
+
+void WelsCopy8x16_msa(uint8_t* pDst, int32_t iStrideD, uint8_t* pSrc,
+                      int32_t iStrideS) {
+    WelsCopy8x8_msa(pDst, iStrideD, pSrc, iStrideS);
+    WelsCopy8x8_msa(pDst + 8 * iStrideD, iStrideD,
+                    pSrc + 8 * iStrideS, iStrideS);
+}
+
+void WelsCopy16x8_msa(uint8_t* pDst, int32_t iStrideD, uint8_t* pSrc,
+                      int32_t iStrideS) {
+    v16u8 src0, src1;
+    for (int i = 0; i < 4; i++) {
+        MSA_LD_V2(v16u8, pSrc, iStrideS, src0, src1);
+        MSA_ST_V2(v16u8, src0, src1, pDst, iStrideD);
+        pSrc += 2 * iStrideS;
+        pDst += 2 * iStrideD;
+    }
+}
+
+void WelsCopy16x16_msa(uint8_t* pDst, int32_t iStrideD, uint8_t* pSrc,
+                       int32_t iStrideS) {
+    WelsCopy16x8_msa(pDst, iStrideD, pSrc, iStrideS);
+    WelsCopy16x8_msa(pDst + 8 * iStrideD, iStrideD,
+                     pSrc + 8 * iStrideS, iStrideS);
+};
--- /dev/null
+++ b/codec/common/mips/deblock_msa.c
@@ -1,0 +1,1024 @@
+/*!
+ * \copy
+ *     Copyright (C) 2019 Loongson Technology Co. Ltd.
+ *     Contributed by Gu Xiwei(guxiwei-hf@loongson.cn)
+ *     All rights reserved.
+ *
+ *     Redistribution and use in source and binary forms, with or without
+ *     modification, are permitted provided that the following conditions
+ *     are met:
+ *
+ *        * Redistributions of source code must retain the above copyright
+ *          notice, this list of conditions and the following disclaimer.
+ *
+ *        * Redistributions in binary form must reproduce the above copyright
+ *          notice, this list of conditions and the following disclaimer in
+ *          the documentation and/or other materials provided with the
+ *          distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ *     FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ *     COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *     BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ *     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ *     LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ *     ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ *     POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * \file    deblock_msa.c
+ *
+ * \brief   MIPS MSA optimizations
+ *
+ * \date    15/05/2020 Created
+ *
+ *************************************************************************************
+ */
+
+#include <stdint.h>
+#include "msa_macros.h"
+
+void DeblockLumaLt4V_msa(uint8_t *pPix, int32_t iStride, int32_t iAlpha,
+                         int32_t iBeta, int8_t *pTc) {
+    v16u8 p0, p1, p2, q0, q1, q2;
+    v16i8 iTc, negiTc, negTc, flags, f;
+    v8i16 p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, q0_l, q0_r, q1_l, q1_r, q2_l, q2_r;
+    v8i16 tc_l, tc_r, negTc_l, negTc_r;
+    v8i16 iTc_l, iTc_r, negiTc_l, negiTc_r;
+    // Use for temporary variable
+    v8i16 t0, t1, t2, t3;
+    v16u8 alpha, beta;
+    v16u8 bDetaP0Q0, bDetaP1P0, bDetaQ1Q0, bDetaP2P0, bDetaQ2Q0;
+    v16i8 const_1_b = __msa_ldi_b(1);
+    v8i16 const_1_h = __msa_ldi_h(1);
+    v8i16 const_4_h = __msa_ldi_h(4);
+    v8i16 const_not_255_h = __msa_ldi_h(~255);
+    v16i8 zero = { 0 };
+    v16i8 tc = { pTc[0  >> 2], pTc[1  >> 2], pTc[2  >> 2], pTc[3  >> 2],
+                 pTc[4  >> 2], pTc[5  >> 2], pTc[6  >> 2], pTc[7  >> 2],
+                 pTc[8  >> 2], pTc[9  >> 2], pTc[10 >> 2], pTc[11 >> 2],
+                 pTc[12 >> 2], pTc[13 >> 2], pTc[14 >> 2], pTc[15 >> 2] };
+    negTc = zero - tc;
+    iTc = tc;
+
+    // Load data from pPix
+    MSA_LD_V4(v16u8, pPix - 3 * iStride, iStride, p2, p1, p0, q0);
+    MSA_LD_V2(v16u8, pPix + iStride, iStride, q1, q2);
+    alpha = (v16u8)__msa_fill_b(iAlpha);
+    beta  = (v16u8)__msa_fill_b(iBeta);
+
+    bDetaP0Q0 = __msa_asub_u_b(p0, q0);
+    bDetaP1P0 = __msa_asub_u_b(p1, p0);
+    bDetaQ1Q0 = __msa_asub_u_b(q1, q0);
+    bDetaP2P0 = __msa_asub_u_b(p2, p0);
+    bDetaQ2Q0 = __msa_asub_u_b(q2, q0);
+    bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha);
+    bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta);
+    bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta);
+    bDetaP2P0 = (v16u8)__msa_clt_u_b(bDetaP2P0, beta);
+    bDetaQ2Q0 = (v16u8)__msa_clt_u_b(bDetaQ2Q0, beta);
+
+    // Unsigned extend p0, p1, p2, q0, q1, q2 from 8 bits to 16 bits
+    MSA_ILVRL_B4(v8i16, zero, p0, zero, p1,
+                 p0_r, p0_l, p1_r, p1_l);
+    MSA_ILVRL_B4(v8i16, zero, p2, zero, q0,
+                 p2_r, p2_l, q0_r, q0_l);
+    MSA_ILVRL_B4(v8i16, zero, q1, zero, q2,
+                 q1_r, q1_l, q2_r, q2_l);
+    // Signed extend tc, negTc from 8 bits to 16 bits
+    flags = __msa_clt_s_b(tc, zero);
+    MSA_ILVRL_B2(v8i16, flags, tc, tc_r, tc_l);
+    flags = __msa_clt_s_b(negTc, zero);
+    MSA_ILVRL_B2(v8i16, flags, negTc, negTc_r, negTc_l);
+
+    f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0;
+    flags = f & (v16i8)bDetaP2P0;
+    flags = __msa_ceq_b(flags, zero);
+    iTc += ((~flags) & const_1_b);
+    flags = f & (v16i8)bDetaQ2Q0;
+    flags = __msa_ceq_b(flags, zero);
+    iTc += ((~flags) & const_1_b);
+    negiTc = zero - iTc;
+    // Signed extend iTc, negiTc from 8 bits to 16 bits
+    flags = __msa_clt_s_b(iTc, zero);
+    MSA_ILVRL_B2(v8i16, flags, iTc, iTc_r, iTc_l);
+    flags = __msa_clt_s_b(negiTc, zero);
+    MSA_ILVRL_B2(v8i16, flags, negiTc, negiTc_r, negiTc_l);
+
+    // Calculate the left part
+    // p1
+    t0 = (p2_l + ((p0_l + q0_l + const_1_h) >> 1) - (p1_l << 1)) >> 1;
+    t0 = __msa_max_s_h(negTc_l, t0);
+    t0 = __msa_min_s_h(tc_l, t0);
+    t1 = p1_l + t0;
+    // q1
+    t0 = (q2_l + ((p0_l + q0_l + const_1_h) >> 1) - (q1_l << 1)) >> 1;
+    t0 = __msa_max_s_h(negTc_l, t0);
+    t0 = __msa_min_s_h(tc_l, t0);
+    t2 = q1_l + t0;
+    // iDeta
+    t0 = (((q0_l - p0_l) << 2) + (p1_l - q1_l) + const_4_h) >> 3;
+    t0 = __msa_max_s_h(negiTc_l, t0);
+    t0 = __msa_min_s_h(iTc_l, t0);
+    p1_l = t1;
+    q1_l = t2;
+    // p0
+    t1 = p0_l + t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    p0_l = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+    // q0
+    t1 = q0_l - t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    q0_l = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+
+    // Calculate the right part
+    // p1
+    t0 = (p2_r + ((p0_r + q0_r + const_1_h) >> 1) - (p1_r << 1)) >> 1;
+    t0 = __msa_max_s_h(negTc_r, t0);
+    t0 = __msa_min_s_h(tc_r, t0);
+    t1 = p1_r + t0;
+    // q1
+    t0 = (q2_r + ((p0_r + q0_r + const_1_h) >> 1) - (q1_r << 1)) >> 1;
+    t0 = __msa_max_s_h(negTc_r, t0);
+    t0 = __msa_min_s_h(tc_r, t0);
+    t2 = q1_r + t0;
+    // iDeta
+    t0 = (((q0_r - p0_r) << 2) + (p1_r - q1_r) + const_4_h) >> 3;
+    t0 = __msa_max_s_h(negiTc_r, t0);
+    t0 = __msa_min_s_h(iTc_r, t0);
+    p1_r = t1;
+    q1_r = t2;
+    // p0
+    t1 = p0_r + t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    p0_r = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+    // q0
+    t1 = q0_r - t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    q0_r = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+
+    // Combined left and right
+    MSA_PCKEV_B4(v8i16, p1_l, p1_r, p0_l, p0_r, q0_l, q0_r, q1_l, q1_r,
+                 t0, t1, t2, t3);
+    flags = (v16i8)__msa_cle_s_b(zero, tc);
+    flags &= f;
+    p0 = (v16u8)(((v16i8)t1 & flags) + (p0 & (~flags)));
+    q0 = (v16u8)(((v16i8)t2 & flags) + (q0 & (~flags)));
+    // Using t1, t2 as temporary flags
+    t1 = (v8i16)(flags & (~(__msa_ceq_b((v16i8)bDetaP2P0, zero))));
+    p1 = (v16u8)(t0 & t1) + (p1 & (v16u8)(~t1));
+    t2 = (v8i16)(flags & (~(__msa_ceq_b((v16i8)bDetaQ2Q0, zero))));
+    q1 = (v16u8)(t3 & t2) + (q1 & (v16u8)(~t2));
+
+    // Store data to pPix
+    MSA_ST_V4(v16u8, p1, p0, q0, q1, pPix - 2 * iStride, iStride);
+}
+
+void DeblockLumaEq4V_msa(uint8_t *pPix, int32_t iStride, int32_t iAlpha,
+                         int32_t iBeta) {
+    v16u8 p0, p1, p2, p3, q0, q1, q2, q3;
+    v8i16 p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, p3_l, p3_r,
+          q0_l, q0_r, q1_l, q1_r, q2_l, q2_r, q3_l, q3_r;
+    v8i16 t0, t1, t2, t0_con1;
+    v8i16 s0, s1, s2, s0_con1;
+    v16u8 alpha, beta;
+    v16u8 iDetaP0Q0, bDetaP1P0, bDetaQ1Q0, bDetaP2P0, bDetaQ2Q0;
+    // Condition mask
+    v16u8 mask0, mask1;
+    v16i8 const_2_b = __msa_ldi_b(2);
+    v8i16 const_2_h = __msa_ldi_h(2);
+    v8i16 const_4_h = __msa_ldi_h(4);
+    v16i8 zero = { 0 };
+
+    // Load data from pPix
+    MSA_LD_V8(v16u8, pPix - 4 * iStride, iStride, p3, p2, p1, p0,
+              q0, q1, q2, q3);
+    // iAlpha and beta are uint8_t type
+    alpha = (v16u8)__msa_fill_b(iAlpha);
+    beta  = (v16u8)__msa_fill_b(iBeta);
+
+    // iDetaP0Q0 is not bool type
+    iDetaP0Q0 = __msa_asub_u_b(p0, q0);
+
+    bDetaP1P0 = __msa_asub_u_b(p1, p0);
+    bDetaQ1Q0 = __msa_asub_u_b(q1, q0);
+    bDetaP2P0 = __msa_asub_u_b(p2, p0);
+    bDetaQ2Q0 = __msa_asub_u_b(q2, q0);
+    bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta);
+    bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta);
+    bDetaP2P0 = (v16u8)__msa_clt_u_b(bDetaP2P0, beta);
+    bDetaQ2Q0 = (v16u8)__msa_clt_u_b(bDetaQ2Q0, beta);
+
+    // Unsigned extend p0, p1, p2, p3, q0, q1, q2, q3 from 8 bits to 16 bits
+    MSA_ILVRL_B4(v8i16, zero, p0, zero, p1,
+                 p0_r, p0_l, p1_r, p1_l);
+    MSA_ILVRL_B4(v8i16, zero, p2, zero, p3,
+                 p2_r, p2_l, p3_r, p3_l);
+    MSA_ILVRL_B4(v8i16, zero, q0, zero, q1,
+                 q0_r, q0_l, q1_r, q1_l);
+    MSA_ILVRL_B4(v8i16, zero, q2, zero, q3,
+                 q2_r, q2_l, q3_r, q3_l);
+
+    // Calculate condition mask
+    // (iDetaP0Q0 < iAlpha) && bDetaP1P0 && bDetaQ1Q0
+    mask0 = (v16u8)__msa_clt_u_b(iDetaP0Q0, alpha);
+    mask0 &= bDetaP1P0;
+    mask0 &= bDetaQ1Q0;
+    // iDetaP0Q0 < ((iAlpha >> 2) + 2)
+    mask1 = (v16u8)((alpha >> 2) + const_2_b);
+    mask1 = (v16u8)__msa_clt_u_b(iDetaP0Q0, mask1);
+
+    // Calculate the left part
+    // p0
+    t0 = (p2_l + (p1_l << 1) + (p0_l << 1) + (q0_l << 1) + q1_l + const_4_h) >> 3;
+    // p1
+    t1 = (p2_l + p1_l + p0_l + q0_l + const_2_h) >> 2;
+    // p2
+    t2 = ((p3_l << 1) + p2_l + (p2_l << 1) + p1_l + p0_l + q0_l + const_4_h) >> 3;
+    // p0 condition 1
+    t0_con1 = ((p1_l << 1) + p0_l + q1_l + const_2_h) >> 2;
+    // q0
+    s0 = (p1_l + (p0_l << 1) + (q0_l << 1) + (q1_l << 1) + q2_l + const_4_h) >> 3;
+    // q1
+    s1 = (p0_l + q0_l + q1_l + q2_l + const_2_h) >> 2;
+    // q2
+    s2 = ((q3_l << 1) + q2_l + (q2_l << 1) + q1_l + q0_l + p0_l + const_4_h) >> 3;
+    // q0 condition 1
+    s0_con1 = ((q1_l << 1) + q0_l + p1_l + const_2_h) >> 2;
+    // Move back
+    p0_l = t0;
+    p1_l = t1;
+    p2_l = t2;
+    q0_l = s0;
+    q1_l = s1;
+    q2_l = s2;
+    // Use p3_l, q3_l as tmp
+    p3_l = t0_con1;
+    q3_l = s0_con1;
+
+    // Calculate the right part
+    // p0
+    t0 = (p2_r + (p1_r << 1) + (p0_r << 1) + (q0_r << 1) + q1_r + const_4_h) >> 3;
+    // p1
+    t1 = (p2_r + p1_r + p0_r + q0_r + const_2_h) >> 2;
+    // p2
+    t2 = ((p3_r << 1) + p2_r + (p2_r << 1) + p1_r + p0_r + q0_r + const_4_h) >> 3;
+    // p0 condition 1
+    t0_con1 = ((p1_r << 1) + p0_r + q1_r + const_2_h) >> 2;
+    // q0
+    s0 = (p1_r + (p0_r << 1) + (q0_r << 1) + (q1_r << 1) + q2_r + const_4_h) >> 3;
+    // q1
+    s1 = (p0_r + q0_r + q1_r + q2_r + const_2_h) >> 2;
+    // q2
+    s2 = ((q3_r << 1) + q2_r + (q2_r << 1) + q1_r + q0_r + p0_r + const_4_h) >> 3;
+    // q0 condition 1
+    s0_con1 = ((q1_r << 1) + q0_r + p1_r + const_2_h) >> 2;
+    // Move back
+    p0_r = t0;
+    p1_r = t1;
+    p2_r = t2;
+    q0_r = s0;
+    q1_r = s1;
+    q2_r = s2;
+    // Use p3_r, q3_r as tmp
+    p3_r = t0_con1;
+    q3_r = s0_con1;
+
+    // Combined left and right
+    MSA_PCKEV_B4(v8i16, p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, q0_l, q0_r,
+                 t0, t1, t2, s0);
+    MSA_PCKEV_B4(v8i16, q1_l, q1_r, q2_l, q2_r, p3_l, p3_r, q3_l, q3_r,
+                 s1, s2, t0_con1, s0_con1);
+    t0 = (v8i16)(((v16u8)t0 & mask0 & mask1 & bDetaP2P0) + ((v16u8)t0_con1 &
+         mask0 & mask1 & (~bDetaP2P0)) + ((v16u8)t0_con1 & mask0 & (~mask1)));
+    t1 = (v8i16)((v16u8)t1 & mask0 & mask1 & bDetaP2P0);
+    t2 = (v8i16)((v16u8)t2 & mask0 & mask1 & bDetaP2P0);
+    s0 = (v8i16)(((v16u8)s0 & mask0 & mask1 & bDetaQ2Q0) + ((v16u8)s0_con1 &
+         mask0 & mask1 & (~bDetaQ2Q0)) + ((v16u8)s0_con1 & mask0 & (~mask1)));
+    s1 = (v8i16)((v16u8)s1 & mask0 & mask1 & bDetaQ2Q0);
+    s2 = (v8i16)((v16u8)s2 & mask0 & mask1 & bDetaQ2Q0);
+    p0 = (v16u8)t0 + (p0 & (~mask0));
+    p1 = (v16u8)t1 + (p1 & ~(mask0 & mask1 & bDetaP2P0));
+    p2 = (v16u8)t2 + (p2 & ~(mask0 & mask1 & bDetaP2P0));
+    q0 = (v16u8)s0 + (q0 & (~mask0));
+    q1 = (v16u8)s1 + (q1 & ~(mask0 & mask1 & bDetaQ2Q0));
+    q2 = (v16u8)s2 + (q2 & ~(mask0 & mask1 & bDetaQ2Q0));
+
+    // Store data to pPix
+    MSA_ST_V4(v16u8, p2, p1, p0, q0, pPix - 3 * iStride, iStride);
+    MSA_ST_V2(v16u8, q1, q2, pPix + iStride, iStride);
+}
+
+
+void DeblockLumaLt4H_msa(uint8_t* pPix, int32_t iStride, int32_t iAlpha,
+                         int32_t iBeta, int8_t* pTc) {
+    v16u8 p0, p1, p2, q0, q1, q2;
+    v16i8 iTc, negiTc, negTc, flags, f;
+    v8i16 p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, q0_l, q0_r, q1_l, q1_r, q2_l, q2_r;
+    v8i16 tc_l, tc_r, negTc_l, negTc_r;
+    v8i16 iTc_l, iTc_r, negiTc_l, negiTc_r;
+    // Use for temporary variable
+    v8i16 t0, t1, t2, t3;
+    v16u8 alpha, beta;
+    v16u8 bDetaP0Q0, bDetaP1P0, bDetaQ1Q0, bDetaP2P0, bDetaQ2Q0;
+    v16i8 const_1_b = __msa_ldi_b(1);
+    v8i16 const_1_h = __msa_ldi_h(1);
+    v8i16 const_4_h = __msa_ldi_h(4);
+    v8i16 const_not_255_h = __msa_ldi_h(~255);
+    v16i8 zero = { 0 };
+    v16i8 tc = { pTc[0  >> 2], pTc[1  >> 2], pTc[2  >> 2], pTc[3  >> 2],
+                 pTc[4  >> 2], pTc[5  >> 2], pTc[6  >> 2], pTc[7  >> 2],
+                 pTc[8  >> 2], pTc[9  >> 2], pTc[10 >> 2], pTc[11 >> 2],
+                 pTc[12 >> 2], pTc[13 >> 2], pTc[14 >> 2], pTc[15 >> 2] };
+    negTc = zero - tc;
+    iTc = tc;
+
+    // Load data from pPix
+    MSA_LD_V8(v8i16, pPix - 3, iStride, t0, t1, t2, t3, q1_l, q1_r, q2_l, q2_r);
+    MSA_LD_V8(v8i16, pPix + 8 * iStride - 3, iStride, p0_l, p0_r, p1_l, p1_r,
+              p2_l, p2_r, q0_l, q0_r);
+    // Transpose 16x8 to 8x16, we just need p0, p1, p2, q0, q1, q2
+    MSA_TRANSPOSE16x8_B(v16u8, t0, t1, t2, t3, q1_l, q1_r, q2_l, q2_r,
+                        p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, q0_l, q0_r,
+                        p2, p1, p0, q0, q1, q2, alpha, beta);
+
+    alpha = (v16u8)__msa_fill_b(iAlpha);
+    beta  = (v16u8)__msa_fill_b(iBeta);
+
+    bDetaP0Q0 = __msa_asub_u_b(p0, q0);
+    bDetaP1P0 = __msa_asub_u_b(p1, p0);
+    bDetaQ1Q0 = __msa_asub_u_b(q1, q0);
+    bDetaP2P0 = __msa_asub_u_b(p2, p0);
+    bDetaQ2Q0 = __msa_asub_u_b(q2, q0);
+    bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha);
+    bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta);
+    bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta);
+    bDetaP2P0 = (v16u8)__msa_clt_u_b(bDetaP2P0, beta);
+    bDetaQ2Q0 = (v16u8)__msa_clt_u_b(bDetaQ2Q0, beta);
+
+    // Unsigned extend p0, p1, p2, q0, q1, q2 from 8 bits to 16 bits
+    MSA_ILVRL_B4(v8i16, zero, p0, zero, p1,
+                 p0_r, p0_l, p1_r, p1_l);
+    MSA_ILVRL_B4(v8i16, zero, p2, zero, q0,
+                 p2_r, p2_l, q0_r, q0_l);
+    MSA_ILVRL_B4(v8i16, zero, q1, zero, q2,
+                 q1_r, q1_l, q2_r, q2_l);
+    // Signed extend tc, negTc from 8 bits to 16 bits
+    flags = __msa_clt_s_b(tc, zero);
+    MSA_ILVRL_B2(v8i16, flags, tc, tc_r, tc_l);
+    flags = __msa_clt_s_b(negTc, zero);
+    MSA_ILVRL_B2(v8i16, flags, negTc, negTc_r, negTc_l);
+
+    f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0;
+    flags = f & (v16i8)bDetaP2P0;
+    flags = __msa_ceq_b(flags, zero);
+    iTc += ((~flags) & const_1_b);
+    flags = f & (v16i8)bDetaQ2Q0;
+    flags = __msa_ceq_b(flags, zero);
+    iTc += ((~flags) & const_1_b);
+    negiTc = zero - iTc;
+    // Signed extend iTc, negiTc from 8 bits to 16 bits
+    flags = __msa_clt_s_b(iTc, zero);
+    MSA_ILVRL_B2(v8i16, flags, iTc, iTc_r, iTc_l);
+    flags = __msa_clt_s_b(negiTc, zero);
+    MSA_ILVRL_B2(v8i16, flags, negiTc, negiTc_r, negiTc_l);
+
+    // Calculate the left part
+    // p1
+    t0 = (p2_l + ((p0_l + q0_l + const_1_h) >> 1) - (p1_l << 1)) >> 1;
+    t0 = __msa_max_s_h(negTc_l, t0);
+    t0 = __msa_min_s_h(tc_l, t0);
+    t1 = p1_l + t0;
+    // q1
+    t0 = (q2_l + ((p0_l + q0_l + const_1_h) >> 1) - (q1_l << 1)) >> 1;
+    t0 = __msa_max_s_h(negTc_l, t0);
+    t0 = __msa_min_s_h(tc_l, t0);
+    t2 = q1_l + t0;
+    // iDeta
+    t0 = (((q0_l - p0_l) << 2) + (p1_l - q1_l) + const_4_h) >> 3;
+    t0 = __msa_max_s_h(negiTc_l, t0);
+    t0 = __msa_min_s_h(iTc_l, t0);
+    p1_l = t1;
+    q1_l = t2;
+    // p0
+    t1 = p0_l + t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    p0_l = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+    // q0
+    t1 = q0_l - t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    q0_l = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+
+    // Calculate the right part
+    // p1
+    t0 = (p2_r + ((p0_r + q0_r + const_1_h) >> 1) - (p1_r << 1)) >> 1;
+    t0 = __msa_max_s_h(negTc_r, t0);
+    t0 = __msa_min_s_h(tc_r, t0);
+    t1 = p1_r + t0;
+    // q1
+    t0 = (q2_r + ((p0_r + q0_r + const_1_h) >> 1) - (q1_r << 1)) >> 1;
+    t0 = __msa_max_s_h(negTc_r, t0);
+    t0 = __msa_min_s_h(tc_r, t0);
+    t2 = q1_r + t0;
+    // iDeta
+    t0 = (((q0_r - p0_r) << 2) + (p1_r - q1_r) + const_4_h) >> 3;
+    t0 = __msa_max_s_h(negiTc_r, t0);
+    t0 = __msa_min_s_h(iTc_r, t0);
+    p1_r = t1;
+    q1_r = t2;
+    // p0
+    t1 = p0_r + t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    p0_r = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+    // q0
+    t1 = q0_r - t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    q0_r = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+
+    // Combined left and right
+    MSA_PCKEV_B4(v8i16, p1_l, p1_r, p0_l, p0_r, q0_l, q0_r, q1_l, q1_r,
+                 t0, t1, t2, t3);
+    flags = (v16i8)__msa_cle_s_b(zero, tc);
+    flags &= f;
+    p0 = (v16u8)(((v16i8)t1 & flags) + (p0 & (~flags)));
+    q0 = (v16u8)(((v16i8)t2 & flags) + (q0 & (~flags)));
+    // Using t1, t2 as temporary flags
+    t1 = (v8i16)(flags & (~(__msa_ceq_b((v16i8)bDetaP2P0, zero))));
+    p1 = (v16u8)(t0 & t1) + (p1 & (v16u8)(~t1));
+    t2 = (v8i16)(flags & (~(__msa_ceq_b((v16i8)bDetaQ2Q0, zero))));
+    q1 = (v16u8)(t3 & t2) + (q1 & (v16u8)(~t2));
+
+    MSA_ILVRL_B4(v8i16, p0, p1, q1, q0, t0, t1, t2, t3);
+    MSA_ILVRL_H4(v16u8, t2, t0, t3, t1, p1, p0, q0, q1);
+    // Store data to pPix
+    MSA_ST_W8(p1, p0, 0, 1, 2, 3, 0, 1, 2, 3, pPix - 2, iStride);
+    MSA_ST_W8(q0, q1, 0, 1, 2, 3, 0, 1, 2, 3, pPix + 8 * iStride - 2, iStride);
+}
+
+void DeblockLumaEq4H_msa(uint8_t *pPix, int32_t iStride, int32_t iAlpha,
+                         int32_t iBeta) {
+    v16u8 p0, p1, p2, p3, q0, q1, q2, q3;
+    v8i16 p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, p3_l, p3_r,
+          q0_l, q0_r, q1_l, q1_r, q2_l, q2_r, q3_l, q3_r;
+    v8i16 t0, t1, t2, t0_con1;
+    v8i16 s0, s1, s2, s0_con1;
+    v16u8 alpha, beta;
+    v16u8 iDetaP0Q0, bDetaP1P0, bDetaQ1Q0, bDetaP2P0, bDetaQ2Q0;
+    // Condition mask
+    v16u8 mask0, mask1;
+    v16i8 const_2_b = __msa_ldi_b(2);
+    v8i16 const_2_h = __msa_ldi_h(2);
+    v8i16 const_4_h = __msa_ldi_h(4);
+    v16i8 zero = { 0 };
+
+    // Load data from pPix
+    MSA_LD_V8(v8i16, pPix - 4, iStride, p0_l, p0_r, p1_l, p1_r,
+              p2_l, p2_r, p3_l, p3_r);
+    MSA_LD_V8(v8i16, pPix + 8 * iStride - 4, iStride,
+              q0_l, q0_r, q1_l, q1_r, q2_l, q2_r, q3_l, q3_r);
+    // Transpose 16x8 to 8x16, we just need p0, p1, p2, p3, q0, q1, q2, q3
+    MSA_TRANSPOSE16x8_B(v16u8, p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, p3_l, p3_r,
+                        q0_l, q0_r, q1_l, q1_r, q2_l, q2_r, q3_l, q3_r,
+                        p3, p2, p1, p0, q0, q1, q2, q3);
+    // iAlpha and beta are uint8_t type
+    alpha = (v16u8)__msa_fill_b(iAlpha);
+    beta  = (v16u8)__msa_fill_b(iBeta);
+
+    // iDetaP0Q0 is not bool type
+    iDetaP0Q0 = __msa_asub_u_b(p0, q0);
+
+    bDetaP1P0 = __msa_asub_u_b(p1, p0);
+    bDetaQ1Q0 = __msa_asub_u_b(q1, q0);
+    bDetaP2P0 = __msa_asub_u_b(p2, p0);
+    bDetaQ2Q0 = __msa_asub_u_b(q2, q0);
+    bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta);
+    bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta);
+    bDetaP2P0 = (v16u8)__msa_clt_u_b(bDetaP2P0, beta);
+    bDetaQ2Q0 = (v16u8)__msa_clt_u_b(bDetaQ2Q0, beta);
+
+    // Unsigned extend p0, p1, p2, p3, q0, q1, q2, q3 from 8 bits to 16 bits
+    MSA_ILVRL_B4(v8i16, zero, p0, zero, p1,
+                 p0_r, p0_l, p1_r, p1_l);
+    MSA_ILVRL_B4(v8i16, zero, p2, zero, p3,
+                 p2_r, p2_l, p3_r, p3_l);
+    MSA_ILVRL_B4(v8i16, zero, q0, zero, q1,
+                 q0_r, q0_l, q1_r, q1_l);
+    MSA_ILVRL_B4(v8i16, zero, q2, zero, q3,
+                 q2_r, q2_l, q3_r, q3_l);
+
+    // Calculate condition mask
+    // (iDetaP0Q0 < iAlpha) && bDetaP1P0 && bDetaQ1Q0
+    mask0 = (v16u8)__msa_clt_u_b(iDetaP0Q0, alpha);
+    mask0 &= bDetaP1P0;
+    mask0 &= bDetaQ1Q0;
+    // iDetaP0Q0 < ((iAlpha >> 2) + 2)
+    mask1 = (v16u8)((alpha >> 2) + const_2_b);
+    mask1 = (v16u8)__msa_clt_u_b(iDetaP0Q0, mask1);
+
+    // Calculate the left part
+    // p0
+    t0 = (p2_l + (p1_l << 1) + (p0_l << 1) + (q0_l << 1) + q1_l + const_4_h) >> 3;
+    // p1
+    t1 = (p2_l + p1_l + p0_l + q0_l + const_2_h) >> 2;
+    // p2
+    t2 = ((p3_l << 1) + p2_l + (p2_l << 1) + p1_l + p0_l + q0_l + const_4_h) >> 3;
+    // p0 condition 1
+    t0_con1 = ((p1_l << 1) + p0_l + q1_l + const_2_h) >> 2;
+    // q0
+    s0 = (p1_l + (p0_l << 1) + (q0_l << 1) + (q1_l << 1) + q2_l + const_4_h) >> 3;
+    // q1
+    s1 = (p0_l + q0_l + q1_l + q2_l + const_2_h) >> 2;
+    // q2
+    s2 = ((q3_l << 1) + q2_l + (q2_l << 1) + q1_l + q0_l + p0_l + const_4_h) >> 3;
+    // q0 condition 1
+    s0_con1 = ((q1_l << 1) + q0_l + p1_l + const_2_h) >> 2;
+    // Move back
+    p0_l = t0;
+    p1_l = t1;
+    p2_l = t2;
+    q0_l = s0;
+    q1_l = s1;
+    q2_l = s2;
+    // Use p3_l, q3_l as tmp
+    p3_l = t0_con1;
+    q3_l = s0_con1;
+
+    // Calculate the right part
+    // p0
+    t0 = (p2_r + (p1_r << 1) + (p0_r << 1) + (q0_r << 1) + q1_r + const_4_h) >> 3;
+    // p1
+    t1 = (p2_r + p1_r + p0_r + q0_r + const_2_h) >> 2;
+    // p2
+    t2 = ((p3_r << 1) + p2_r + (p2_r << 1) + p1_r + p0_r + q0_r + const_4_h) >> 3;
+    // p0 condition 1
+    t0_con1 = ((p1_r << 1) + p0_r + q1_r + const_2_h) >> 2;
+    // q0
+    s0 = (p1_r + (p0_r << 1) + (q0_r << 1) + (q1_r << 1) + q2_r + const_4_h) >> 3;
+    // q1
+    s1 = (p0_r + q0_r + q1_r + q2_r + const_2_h) >> 2;
+    // q2
+    s2 = ((q3_r << 1) + q2_r + (q2_r << 1) + q1_r + q0_r + p0_r + const_4_h) >> 3;
+    // q0 condition 1
+    s0_con1 = ((q1_r << 1) + q0_r + p1_r + const_2_h) >> 2;
+    // Move back
+    p0_r = t0;
+    p1_r = t1;
+    p2_r = t2;
+    q0_r = s0;
+    q1_r = s1;
+    q2_r = s2;
+    // Use p3_r, q3_r as tmp
+    p3_r = t0_con1;
+    q3_r = s0_con1;
+
+    // Combined left and right
+    MSA_PCKEV_B4(v8i16, p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, q0_l, q0_r,
+                 t0, t1, t2, s0);
+    MSA_PCKEV_B4(v8i16, q1_l, q1_r, q2_l, q2_r, p3_l, p3_r, q3_l, q3_r,
+                 s1, s2, t0_con1, s0_con1);
+    t0 = (v8i16)(((v16u8)t0 & mask0 & mask1 & bDetaP2P0) + ((v16u8)t0_con1 &
+         mask0 & mask1 & (~bDetaP2P0)) + ((v16u8)t0_con1 & mask0 & (~mask1)));
+    t1 = (v8i16)((v16u8)t1 & mask0 & mask1 & bDetaP2P0);
+    t2 = (v8i16)((v16u8)t2 & mask0 & mask1 & bDetaP2P0);
+    s0 = (v8i16)(((v16u8)s0 & mask0 & mask1 & bDetaQ2Q0) + ((v16u8)s0_con1 &
+         mask0 & mask1 & (~bDetaQ2Q0)) + ((v16u8)s0_con1 & mask0 & (~mask1)));
+    s1 = (v8i16)((v16u8)s1 & mask0 & mask1 & bDetaQ2Q0);
+    s2 = (v8i16)((v16u8)s2 & mask0 & mask1 & bDetaQ2Q0);
+    p0 = (v16u8)t0 + (p0 & (~mask0));
+    p1 = (v16u8)t1 + (p1 & ~(mask0 & mask1 & bDetaP2P0));
+    p2 = (v16u8)t2 + (p2 & ~(mask0 & mask1 & bDetaP2P0));
+    q0 = (v16u8)s0 + (q0 & (~mask0));
+    q1 = (v16u8)s1 + (q1 & ~(mask0 & mask1 & bDetaQ2Q0));
+    q2 = (v16u8)s2 + (q2 & ~(mask0 & mask1 & bDetaQ2Q0));
+
+    MSA_ILVRL_B4(v8i16, p1, p2, q0, p0, t0, s0, t1, s1);
+    MSA_ILVRL_B2(v8i16, q2, q1, t2, s2);
+    MSA_ILVRL_H4(v16u8, t1, t0, s1, s0, p2, p1, p0, q0);
+    // Store data to pPix
+    MSA_ST_W8(p2, p1, 0, 1, 2, 3, 0, 1, 2, 3, pPix - 3, iStride);
+    MSA_ST_W8(p0, q0, 0, 1, 2, 3, 0, 1, 2, 3, pPix + 8 * iStride - 3, iStride);
+    MSA_ST_H8(t2, 0, 1, 2, 3, 4, 5, 6, 7, pPix + 1, iStride);
+    MSA_ST_H8(s2, 0, 1, 2, 3, 4, 5, 6, 7, pPix + 8 * iStride + 1, iStride);
+}
+
+void DeblockChromaLt4V_msa(uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride,
+                           int32_t iAlpha, int32_t iBeta, int8_t* pTc) {
+    v16u8 p0, p1, q0, q1;
+    v8i16 p0_e, p1_e, q0_e, q1_e;
+    v16i8 negTc, flags, f;
+    v8i16 tc_e, negTc_e;
+    // Use for temporary variable
+    v8i16 t0, t1, t2, t3;
+    v16u8 alpha, beta;
+    v16u8 bDetaP0Q0, bDetaP1P0, bDetaQ1Q0;
+    v8i16 const_4_h = __msa_ldi_h(4);
+    v8i16 const_not_255_h = __msa_ldi_h(~255);
+    v16i8 zero = { 0 };
+    v16i8 tc = { pTc[0  >> 1], pTc[1  >> 1], pTc[2  >> 1], pTc[3  >> 1],
+                 pTc[4  >> 1], pTc[5  >> 1], pTc[6  >> 1], pTc[7  >> 1] };
+    negTc = zero - tc;
+
+    alpha = (v16u8)__msa_fill_b(iAlpha);
+    beta  = (v16u8)__msa_fill_b(iBeta);
+    // Signed extend tc, negTc from 8 bits to 16 bits
+    flags = __msa_clt_s_b(tc, zero);
+    MSA_ILVR_B(v8i16, flags, tc, tc_e);
+    flags = __msa_clt_s_b(negTc, zero);
+    MSA_ILVR_B(v8i16, flags, negTc, negTc_e);
+
+    // Cb
+    // Load data from pPixCb
+    MSA_LD_V4(v16u8, pPixCb - 2 * iStride, iStride, p1, p0, q0, q1);
+
+    bDetaP0Q0 = __msa_asub_u_b(p0, q0);
+    bDetaP1P0 = __msa_asub_u_b(p1, p0);
+    bDetaQ1Q0 = __msa_asub_u_b(q1, q0);
+    bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha);
+    bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta);
+    bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta);
+
+    // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits
+    MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1,
+                p0_e, p1_e, q0_e, q1_e);
+
+    f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0;
+
+    // iDeta
+    t0 = (((q0_e - p0_e) << 2) + (p1_e - q1_e) + const_4_h) >> 3;
+    t0 = __msa_max_s_h(negTc_e, t0);
+    t0 = __msa_min_s_h(tc_e, t0);
+    // p0
+    t1 = p0_e + t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    p0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+    // q0
+    t1 = q0_e - t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    q0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+
+    MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1);
+    flags = (v16i8)__msa_cle_s_b(zero, tc);
+    flags &= f;
+    p0 = (v16u8)(((v16i8)t0 & flags) + (p0 & (~flags)));
+    q0 = (v16u8)(((v16i8)t1 & flags) + (q0 & (~flags)));
+    // Store data to pPixCb
+    MSA_ST_D(p0, 0, pPixCb - iStride);
+    MSA_ST_D(q0, 0, pPixCb);
+
+    // Cr
+    // Load data from pPixCr
+    MSA_LD_V4(v16u8, pPixCr - 2 * iStride, iStride, p1, p0, q0, q1);
+
+    bDetaP0Q0 = __msa_asub_u_b(p0, q0);
+    bDetaP1P0 = __msa_asub_u_b(p1, p0);
+    bDetaQ1Q0 = __msa_asub_u_b(q1, q0);
+    bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha);
+    bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta);
+    bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta);
+
+    // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits
+    MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1,
+                p0_e, p1_e, q0_e, q1_e);
+
+    f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0;
+
+    // iDeta
+    t0 = (((q0_e - p0_e) << 2) + (p1_e - q1_e) + const_4_h) >> 3;
+    t0 = __msa_max_s_h(negTc_e, t0);
+    t0 = __msa_min_s_h(tc_e, t0);
+    // p0
+    t1 = p0_e + t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    p0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+    // q0
+    t1 = q0_e - t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    q0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+
+    MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1);
+    flags = (v16i8)__msa_cle_s_b(zero, tc);
+    flags &= f;
+    p0 = (v16u8)(((v16i8)t0 & flags) + (p0 & (~flags)));
+    q0 = (v16u8)(((v16i8)t1 & flags) + (q0 & (~flags)));
+    // Store data to pPixCr
+    MSA_ST_D(p0, 0, pPixCr - iStride);
+    MSA_ST_D(q0, 0, pPixCr);
+}
+
+void DeblockChromaEq4V_msa(uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride,
+                           int32_t iAlpha, int32_t iBeta) {
+    v16u8 p0, p1, q0, q1;
+    v8i16 p0_e, p1_e, q0_e, q1_e;
+    v16i8 f;
+    // Use for temporary variable
+    v8i16 t0, t1;
+    v16u8 alpha, beta;
+    v16u8 bDetaP0Q0, bDetaP1P0, bDetaQ1Q0;
+    v8i16 const_2_h = __msa_ldi_h(2);
+    v16i8 zero = { 0 };
+
+    alpha = (v16u8)__msa_fill_b(iAlpha);
+    beta  = (v16u8)__msa_fill_b(iBeta);
+
+    // Cb
+    // Load data from pPixCb
+    MSA_LD_V4(v16u8, pPixCb - 2 * iStride, iStride, p1, p0, q0, q1);
+
+    bDetaP0Q0 = __msa_asub_u_b(p0, q0);
+    bDetaP1P0 = __msa_asub_u_b(p1, p0);
+    bDetaQ1Q0 = __msa_asub_u_b(q1, q0);
+    bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha);
+    bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta);
+    bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta);
+
+    // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits
+    MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1,
+                p0_e, p1_e, q0_e, q1_e);
+
+    f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0;
+
+    // p0
+    p0_e = ((p1_e << 1) + p0_e + q1_e + const_2_h) >> 2;
+    // q0
+    q0_e = ((q1_e << 1) + q0_e + p1_e + const_2_h) >> 2;
+
+    MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1);
+    p0 = (v16u8)(((v16i8)t0 & f) + (p0 & (~f)));
+    q0 = (v16u8)(((v16i8)t1 & f) + (q0 & (~f)));
+    // Store data to pPixCb
+    MSA_ST_D(p0, 0, pPixCb - iStride);
+    MSA_ST_D(q0, 0, pPixCb);
+
+    // Cr
+    // Load data from pPixCr
+    MSA_LD_V4(v16u8, pPixCr - 2 * iStride, iStride, p1, p0, q0, q1);
+
+    bDetaP0Q0 = __msa_asub_u_b(p0, q0);
+    bDetaP1P0 = __msa_asub_u_b(p1, p0);
+    bDetaQ1Q0 = __msa_asub_u_b(q1, q0);
+    bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha);
+    bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta);
+    bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta);
+
+    // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits
+    MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1,
+                p0_e, p1_e, q0_e, q1_e);
+
+    f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0;
+
+    // p0
+    p0_e = ((p1_e << 1) + p0_e + q1_e + const_2_h) >> 2;
+    // q0
+    q0_e = ((q1_e << 1) + q0_e + p1_e + const_2_h) >> 2;
+
+    MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1);
+    p0 = (v16u8)(((v16i8)t0 & f) + (p0 & (~f)));
+    q0 = (v16u8)(((v16i8)t1 & f) + (q0 & (~f)));
+    // Store data to pPixCr
+    MSA_ST_D(p0, 0, pPixCr - iStride);
+    MSA_ST_D(q0, 0, pPixCr);
+}
+
+void DeblockChromaLt4H_msa(uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride,
+                           int32_t iAlpha, int32_t iBeta, int8_t* pTc) {
+    v16u8 p0, p1, q0, q1;
+    v8i16 p0_e, p1_e, q0_e, q1_e;
+    v16i8 negTc, flags, f;
+    v8i16 tc_e, negTc_e;
+    // Use for temporary variable
+    v8i16 t0, t1, t2, t3;
+    v16u8 alpha, beta;
+    v16u8 bDetaP0Q0, bDetaP1P0, bDetaQ1Q0;
+    v8i16 const_4_h = __msa_ldi_h(4);
+    v8i16 const_not_255_h = __msa_ldi_h(~255);
+    v16i8 zero = { 0 };
+    v16i8 tc = { pTc[0  >> 1], pTc[1  >> 1], pTc[2  >> 1], pTc[3  >> 1],
+                 pTc[4  >> 1], pTc[5  >> 1], pTc[6  >> 1], pTc[7  >> 1] };
+    negTc = zero - tc;
+
+    alpha = (v16u8)__msa_fill_b(iAlpha);
+    beta  = (v16u8)__msa_fill_b(iBeta);
+    // Signed extend tc, negTc from 8 bits to 16 bits
+    flags = __msa_clt_s_b(tc, zero);
+    MSA_ILVR_B(v8i16, flags, tc, tc_e);
+    flags = __msa_clt_s_b(negTc, zero);
+    MSA_ILVR_B(v8i16, flags, negTc, negTc_e);
+
+    // Cb
+    // Load data from pPixCb
+    MSA_LD_V8(v8i16, pPixCb - 2, iStride, p1_e, p0_e, q0_e, q1_e,
+              t0, t1, t2, t3);
+    // Transpose 8x4 to 4x8, we just need p0, p1, q0, q1
+    MSA_TRANSPOSE8x4_B(v16u8, p1_e, p0_e, q0_e, q1_e, t0, t1, t2, t3,
+                       p1, p0, q0, q1);
+
+    bDetaP0Q0 = __msa_asub_u_b(p0, q0);
+    bDetaP1P0 = __msa_asub_u_b(p1, p0);
+    bDetaQ1Q0 = __msa_asub_u_b(q1, q0);
+    bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha);
+    bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta);
+    bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta);
+
+    // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits
+    MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1,
+                p0_e, p1_e, q0_e, q1_e);
+
+    f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0;
+
+    // iDeta
+    t0 = (((q0_e - p0_e) << 2) + (p1_e - q1_e) + const_4_h) >> 3;
+    t0 = __msa_max_s_h(negTc_e, t0);
+    t0 = __msa_min_s_h(tc_e, t0);
+    // p0
+    t1 = p0_e + t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    p0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+    // q0
+    t1 = q0_e - t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    q0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+
+    MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1);
+    flags = (v16i8)__msa_cle_s_b(zero, tc);
+    flags &= f;
+    p0 = (v16u8)(((v16i8)t0 & flags) + (p0 & (~flags)));
+    q0 = (v16u8)(((v16i8)t1 & flags) + (q0 & (~flags)));
+    // Store data to pPixCb
+    MSA_ILVR_B(v16u8, q0, p0, p0);
+    MSA_ST_H8(p0, 0, 1, 2, 3, 4, 5, 6, 7, pPixCb - 1, iStride);
+
+    // Cr
+    // Load data from pPixCr
+    MSA_LD_V8(v8i16, pPixCr - 2, iStride, p1_e, p0_e, q0_e, q1_e,
+              t0, t1, t2, t3);
+    // Transpose 8x4 to 4x8, we just need p0, p1, q0, q1
+    MSA_TRANSPOSE8x4_B(v16u8, p1_e, p0_e, q0_e, q1_e, t0, t1, t2, t3,
+                       p1, p0, q0, q1);
+
+    bDetaP0Q0 = __msa_asub_u_b(p0, q0);
+    bDetaP1P0 = __msa_asub_u_b(p1, p0);
+    bDetaQ1Q0 = __msa_asub_u_b(q1, q0);
+    bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha);
+    bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta);
+    bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta);
+
+    // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits
+    MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1,
+                p0_e, p1_e, q0_e, q1_e);
+
+    f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0;
+
+    // iDeta
+    t0 = (((q0_e - p0_e) << 2) + (p1_e - q1_e) + const_4_h) >> 3;
+    t0 = __msa_max_s_h(negTc_e, t0);
+    t0 = __msa_min_s_h(tc_e, t0);
+    // p0
+    t1 = p0_e + t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    p0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+    // q0
+    t1 = q0_e - t0;
+    t2 = t1 & const_not_255_h;
+    t3 = __msa_cle_s_h((v8i16)zero, t1);
+    flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero);
+    q0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags));
+
+    MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1);
+    flags = (v16i8)__msa_cle_s_b(zero, tc);
+    flags &= f;
+    p0 = (v16u8)(((v16i8)t0 & flags) + (p0 & (~flags)));
+    q0 = (v16u8)(((v16i8)t1 & flags) + (q0 & (~flags)));
+    // Store data to pPixCr
+    MSA_ILVR_B(v16u8, q0, p0, p0);
+    MSA_ST_H8(p0, 0, 1, 2, 3, 4, 5, 6, 7, pPixCr - 1, iStride);
+}
+
+void DeblockChromaEq4H_msa(uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride,
+                           int32_t iAlpha, int32_t iBeta) {
+    v16u8 p0, p1, q0, q1;
+    v8i16 p0_e, p1_e, q0_e, q1_e;
+    v16i8 f;
+    // Use for temporary variable
+    v8i16 t0, t1, t2, t3;
+    v16u8 alpha, beta;
+    v16u8 bDetaP0Q0, bDetaP1P0, bDetaQ1Q0;
+    v8i16 const_2_h = __msa_ldi_h(2);
+    v16i8 zero = { 0 };
+
+    alpha = (v16u8)__msa_fill_b(iAlpha);
+    beta  = (v16u8)__msa_fill_b(iBeta);
+
+    // Cb
+    // Load data from pPixCb
+    MSA_LD_V8(v8i16, pPixCb - 2, iStride, p1_e, p0_e, q0_e, q1_e,
+              t0, t1, t2, t3);
+    // Transpose 8x4 to 4x8, we just need p0, p1, q0, q1
+    MSA_TRANSPOSE8x4_B(v16u8, p1_e, p0_e, q0_e, q1_e, t0, t1, t2, t3,
+                       p1, p0, q0, q1);
+
+    bDetaP0Q0 = __msa_asub_u_b(p0, q0);
+    bDetaP1P0 = __msa_asub_u_b(p1, p0);
+    bDetaQ1Q0 = __msa_asub_u_b(q1, q0);
+    bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha);
+    bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta);
+    bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta);
+
+    // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits
+    MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1,
+                p0_e, p1_e, q0_e, q1_e);
+
+    f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0;
+
+    // p0
+    p0_e = ((p1_e << 1) + p0_e + q1_e + const_2_h) >> 2;
+    // q0
+    q0_e = ((q1_e << 1) + q0_e + p1_e + const_2_h) >> 2;
+
+    MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1);
+    p0 = (v16u8)(((v16i8)t0 & f) + (p0 & (~f)));
+    q0 = (v16u8)(((v16i8)t1 & f) + (q0 & (~f)));
+    // Store data to pPixCb
+    MSA_ILVR_B(v16u8, q0, p0, p0);
+    MSA_ST_H8(p0, 0, 1, 2, 3, 4, 5, 6, 7, pPixCb - 1, iStride);
+
+    // Cr
+    // Load data from pPixCr
+    MSA_LD_V8(v8i16, pPixCr - 2, iStride, p1_e, p0_e, q0_e, q1_e,
+              t0, t1, t2, t3);
+    // Transpose 8x4 to 4x8, we just need p0, p1, q0, q1
+    MSA_TRANSPOSE8x4_B(v16u8, p1_e, p0_e, q0_e, q1_e, t0, t1, t2, t3,
+                       p1, p0, q0, q1);
+
+    bDetaP0Q0 = __msa_asub_u_b(p0, q0);
+    bDetaP1P0 = __msa_asub_u_b(p1, p0);
+    bDetaQ1Q0 = __msa_asub_u_b(q1, q0);
+    bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha);
+    bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta);
+    bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta);
+
+    // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits
+    MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1,
+                p0_e, p1_e, q0_e, q1_e);
+
+    f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0;
+
+    // p0
+    p0_e = ((p1_e << 1) + p0_e + q1_e + const_2_h) >> 2;
+    // q0
+    q0_e = ((q1_e << 1) + q0_e + p1_e + const_2_h) >> 2;
+
+    MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1);
+    p0 = (v16u8)(((v16i8)t0 & f) + (p0 & (~f)));
+    q0 = (v16u8)(((v16i8)t1 & f) + (q0 & (~f)));
+    // Store data to pPixCr
+    MSA_ILVR_B(v16u8, q0, p0, p0);
+    MSA_ST_H8(p0, 0, 1, 2, 3, 4, 5, 6, 7, pPixCr - 1, iStride);
+}
+
+void WelsNonZeroCount_msa(int8_t* pNonZeroCount) {
+    v16u8 src0, src1;
+    v16u8 zero = { 0 };
+    v16u8 const_1 = (v16u8)__msa_fill_b(0x01);
+
+    MSA_LD_V2(v16u8, pNonZeroCount, 16, src0, src1);
+    src0 = (v16u8)__msa_ceq_b((v16i8)zero, (v16i8)src0);
+    src1 = (v16u8)__msa_ceq_b((v16i8)zero, (v16i8)src1);
+    src0 += const_1;
+    src1 += const_1;
+    MSA_ST_V(v16u8, src0, pNonZeroCount);
+    MSA_ST_D(src1, 0, pNonZeroCount + 16);
+}
--- a/codec/common/targets.mk
+++ b/codec/common/targets.mk
@@ -76,6 +76,8 @@
 COMMON_OBJSMIPS_MMI += $(COMMON_ASM_MIPS_MMI_SRCS:.c=.$(OBJ))
 
 COMMON_ASM_MIPS_MSA_SRCS=\
+	$(COMMON_SRCDIR)/mips/copy_mb_msa.c\
+	$(COMMON_SRCDIR)/mips/deblock_msa.c\
 
 COMMON_OBJSMIPS_MSA += $(COMMON_ASM_MIPS_MSA_SRCS:.c=.$(OBJ))
 ifeq ($(ASM_ARCH), mips)
--- a/codec/decoder/core/src/deblocking.cpp
+++ b/codec/decoder/core/src/deblocking.cpp
@@ -1503,6 +1503,19 @@
     pFunc->pfChromaDeblockingEQ4Hor = DeblockChromaEq4H_mmi;
   }
 #endif//HAVE_MMI
+
+#if defined(HAVE_MSA)
+  if (iCpu & WELS_CPU_MSA) {
+    pFunc->pfLumaDeblockingLT4Ver   = DeblockLumaLt4V_msa;
+    pFunc->pfLumaDeblockingEQ4Ver   = DeblockLumaEq4V_msa;
+    pFunc->pfLumaDeblockingLT4Hor   = DeblockLumaLt4H_msa;
+    pFunc->pfLumaDeblockingEQ4Hor   = DeblockLumaEq4H_msa;
+    pFunc->pfChromaDeblockingLT4Ver = DeblockChromaLt4V_msa;
+    pFunc->pfChromaDeblockingEQ4Ver = DeblockChromaEq4V_msa;
+    pFunc->pfChromaDeblockingLT4Hor = DeblockChromaLt4H_msa;
+    pFunc->pfChromaDeblockingEQ4Hor = DeblockChromaEq4H_msa;
+  }
+#endif//HAVE_MSA
 }
 
 } // namespace WelsDec
--- a/codec/encoder/core/src/deblocking.cpp
+++ b/codec/encoder/core/src/deblocking.cpp
@@ -783,6 +783,11 @@
     *pfSetNZCZero = WelsNonZeroCount_mmi;
   }
 #endif
+#if defined(HAVE_MSA)
+  if (iCpu & WELS_CPU_MSA) {
+    *pfSetNZCZero = WelsNonZeroCount_msa;
+  }
+#endif
 }
 
 void  DeblockingInit (DeblockingFunc*   pFunc,  int32_t iCpu) {
@@ -860,6 +865,19 @@
     pFunc->pfChromaDeblockingEQ4Hor = DeblockChromaEq4H_mmi;
   }
 #endif//HAVE_MMI
+
+#if defined(HAVE_MSA)
+  if (iCpu & WELS_CPU_MSA) {
+    pFunc->pfLumaDeblockingLT4Ver   = DeblockLumaLt4V_msa;
+    pFunc->pfLumaDeblockingEQ4Ver   = DeblockLumaEq4V_msa;
+    pFunc->pfLumaDeblockingLT4Hor   = DeblockLumaLt4H_msa;
+    pFunc->pfLumaDeblockingEQ4Hor   = DeblockLumaEq4H_msa;
+    pFunc->pfChromaDeblockingLT4Ver = DeblockChromaLt4V_msa;
+    pFunc->pfChromaDeblockingEQ4Ver = DeblockChromaEq4V_msa;
+    pFunc->pfChromaDeblockingLT4Hor = DeblockChromaLt4H_msa;
+    pFunc->pfChromaDeblockingEQ4Hor = DeblockChromaEq4H_msa;
+  }
+#endif//HAVE_MSA
 }
 
 
--- a/codec/encoder/core/src/encode_mb_aux.cpp
+++ b/codec/encoder/core/src/encode_mb_aux.cpp
@@ -464,7 +464,7 @@
 void WelsInitEncodingFuncs (SWelsFuncPtrList* pFuncList, uint32_t  uiCpuFlag) {
   pFuncList->pfCopy8x8Aligned           = WelsCopy8x8_c;
   pFuncList->pfCopy16x16Aligned         =
-    pFuncList->pfCopy16x16NotAligned    = WelsCopy16x16_c;
+  pFuncList->pfCopy16x16NotAligned      = WelsCopy16x16_c;
   pFuncList->pfCopy16x8NotAligned       = WelsCopy16x8_c;
   pFuncList->pfCopy8x16Aligned          = WelsCopy8x16_c;
   pFuncList->pfCopy4x4           = WelsCopy4x4_c;
@@ -612,5 +612,16 @@
     pFuncList->pfDctFourT4              = WelsDctFourT4_mmi;
   }
 #endif//HAVE_MMI
+
+#if defined(HAVE_MSA)
+  if (uiCpuFlag & WELS_CPU_MSA) {
+    pFuncList->pfCopy8x8Aligned         = WelsCopy8x8_msa;
+    pFuncList->pfCopy8x16Aligned        = WelsCopy8x16_msa;
+
+    pFuncList->pfCopy16x16Aligned       =
+    pFuncList->pfCopy16x16NotAligned    = WelsCopy16x16_msa;
+    pFuncList->pfCopy16x8NotAligned     = WelsCopy16x8_msa;
+  }
+#endif
 }
 }
--- a/test/api/targets.mk
+++ b/test/api/targets.mk
@@ -4,8 +4,8 @@
 API_TEST_SRCDIR=test/api
 API_TEST_CPP_SRCS=\
 	$(API_TEST_SRCDIR)/BaseDecoderTest.cpp\
-	$(API_TEST_SRCDIR)/BaseThreadDecoderTest.cpp\
 	$(API_TEST_SRCDIR)/BaseEncoderTest.cpp\
+	$(API_TEST_SRCDIR)/BaseThreadDecoderTest.cpp\
 	$(API_TEST_SRCDIR)/cpp_interface_test.cpp\
 	$(API_TEST_SRCDIR)/DataGenerator.cpp\
 	$(API_TEST_SRCDIR)/decode_api_test.cpp\
@@ -12,12 +12,12 @@
 	$(API_TEST_SRCDIR)/decode_encode_test.cpp\
 	$(API_TEST_SRCDIR)/decoder_ec_test.cpp\
 	$(API_TEST_SRCDIR)/decoder_test.cpp\
-	$(API_TEST_SRCDIR)/thread_decoder_test.cpp\
 	$(API_TEST_SRCDIR)/encode_decode_api_test.cpp\
 	$(API_TEST_SRCDIR)/encode_options_test.cpp\
 	$(API_TEST_SRCDIR)/encoder_test.cpp\
 	$(API_TEST_SRCDIR)/ltr_test.cpp\
 	$(API_TEST_SRCDIR)/simple_test.cpp\
+	$(API_TEST_SRCDIR)/thread_decoder_test.cpp\
 
 API_TEST_OBJS += $(API_TEST_CPP_SRCS:.cpp=.$(OBJ))
 
--- a/test/decoder/DecUT_Deblock.cpp
+++ b/test/decoder/DecUT_Deblock.cpp
@@ -163,3 +163,20 @@
 GENERATE_CHROMA_UT (ChromaEq4V_mmi, DeblockChromaEq4V_mmi_wrap, DeblockChromaEq4V_c_wrap, WELS_CPU_MMI, 0)
 GENERATE_CHROMA_UT (ChromaEq4H_mmi, DeblockChromaEq4H_mmi_wrap, DeblockChromaEq4H_c_wrap, WELS_CPU_MMI, 1)
 #endif//HAVE_MMI
+
+#if defined(HAVE_MSA)
+WRAP_LUMA_FUNC (DeblockLumaEq4V_msa)
+WRAP_LUMA_FUNC (DeblockLumaEq4H_msa)
+WRAP_CHROMA_FUNC (DeblockChromaEq4V_msa)
+WRAP_CHROMA_FUNC (DeblockChromaEq4H_msa)
+
+GENERATE_LUMA_UT (LumaLt4V_msa, DeblockLumaLt4V_msa, DeblockLumaLt4V_c, WELS_CPU_MSA, 0)
+GENERATE_LUMA_UT (LumaLt4H_msa, DeblockLumaLt4H_msa, DeblockLumaLt4H_c, WELS_CPU_MSA, 1)
+GENERATE_LUMA_UT (LumaEq4V_msa, DeblockLumaEq4V_msa_wrap, DeblockLumaEq4V_c_wrap, WELS_CPU_MSA, 0)
+GENERATE_LUMA_UT (LumaEq4H_msa, DeblockLumaEq4H_msa_wrap, DeblockLumaEq4H_c_wrap, WELS_CPU_MSA, 1)
+
+GENERATE_CHROMA_UT (ChromaLt4V_msa, DeblockChromaLt4V_msa, DeblockChromaLt4V_c, WELS_CPU_MSA, 0)
+GENERATE_CHROMA_UT (ChromaLt4H_msa, DeblockChromaLt4H_msa, DeblockChromaLt4H_c, WELS_CPU_MSA, 1)
+GENERATE_CHROMA_UT (ChromaEq4V_msa, DeblockChromaEq4V_msa_wrap, DeblockChromaEq4V_c_wrap, WELS_CPU_MSA, 0)
+GENERATE_CHROMA_UT (ChromaEq4H_msa, DeblockChromaEq4H_msa_wrap, DeblockChromaEq4H_c_wrap, WELS_CPU_MSA, 1)
+#endif//HAVE_MSA
--- a/test/decoder/DecUT_IdctResAddPred.cpp
+++ b/test/decoder/DecUT_IdctResAddPred.cpp
@@ -204,3 +204,7 @@
 #if defined(HAVE_NEON_AARCH64)
 GENERATE_SETNONZEROCOUNT (WelsNonZeroCount_AArch64_neon, WELS_CPU_NEON)
 #endif
+
+#if defined(HAVE_MSA)
+GENERATE_SETNONZEROCOUNT (WelsNonZeroCount_msa, WELS_CPU_MSA)
+#endif
--- a/test/encoder/EncUT_EncoderMbAux.cpp
+++ b/test/encoder/EncUT_EncoderMbAux.cpp
@@ -332,6 +332,12 @@
 GENERATE_UT_FOR_COPY (16, 16, WelsCopy16x16NotAligned_mmi);
 GENERATE_UT_FOR_COPY (16, 16, WelsCopy16x16_mmi);
 #endif
+#ifdef HAVE_MSA
+GENERATE_UT_FOR_COPY (8, 8, WelsCopy8x8_msa);
+GENERATE_UT_FOR_COPY (8, 16, WelsCopy8x16_msa);
+GENERATE_UT_FOR_COPY (16, 8, WelsCopy16x8_msa);
+GENERATE_UT_FOR_COPY (16, 16, WelsCopy16x16_msa);
+#endif
 
 namespace {