diff --git a/dnn/training_tf2/dump_lpcnet.py b/dnn/training_tf2/dump_lpcnet.py index 730ecb7509bd2608acbcf9ea18d04658ac0622df..9dcdba47052c075225322c432e6f7bde937bc74e 100755 --- a/dnn/training_tf2/dump_lpcnet.py +++ b/dnn/training_tf2/dump_lpcnet.py @@ -80,7 +80,7 @@ def printSparseVector(f, A, name): qblock = AQ[j*4:(j+1)*4, i*8:(i+1)*8] if np.sum(np.abs(block)) > 1e-10: nb_nonzero = nb_nonzero + 1 - idx = np.append(idx, j) + idx = np.append(idx, j*4) vblock = qblock.transpose((1,0)).reshape((-1,)) W0 = np.concatenate([W0, block.reshape((-1,))]) W = np.concatenate([W, vblock]) diff --git a/dnn/vec.h b/dnn/vec.h index f93200723b6903531708834233823f2ba88f1446..ae6049fab19e7b54abb784564b662c3229d9d020 100644 --- a/dnn/vec.h +++ b/dnn/vec.h @@ -250,7 +250,7 @@ static inline void sparse_sgemv_accum8x4(float *out, const qweight *w, int rows, int pos; float * restrict y; int xj0, xj1, xj2, xj3; - pos = 4 * (*idx++); + pos = (*idx++); xj0 = x[pos+0]; xj1 = x[pos+1]; xj2 = x[pos+2]; @@ -318,7 +318,7 @@ static inline void sparse_sgemv_accum8x4(float *out, const qweight *w, int rows, int pos; float * restrict y; int xj0, xj1, xj2, xj3; - pos = 4 * (*idx++); + pos = (*idx++); xj0 = x[pos+0]; xj1 = x[pos+1]; xj2 = x[pos+2]; @@ -357,7 +357,7 @@ static inline void sparse_sgemv_accum8x4(float *out, const qweight *w, int rows, int pos; float * restrict y; float xj0, xj1, xj2, xj3; - pos = 4 * (*idx++); + pos = (*idx++); xj0 = x[pos+0]; xj1 = x[pos+1]; xj2 = x[pos+2]; diff --git a/dnn/vec_avx.h b/dnn/vec_avx.h index df02dca36624f97f4fa907ca1933cc6fba9c393e..f18c771ae7d232d204d9db4f574b5d686e9d7fe1 100644 --- a/dnn/vec_avx.h +++ b/dnn/vec_avx.h @@ -508,7 +508,7 @@ static inline void sparse_sgemv_accum8x4(float *_out, const qweight *w, int rows __m256i vxj; __m256i vw; int pos; - pos = 4 * (*idx++); + pos = (*idx++); vxj = _mm256_set1_epi32(*(int*)&x[pos]); vw = _mm256_loadu_si256((const __m256i *)w); //_mm256_lddqu_si256? tmp = _mm256_maddubs_epi16(vxj, vw); //swap? @@ -544,19 +544,19 @@ static inline void sparse_sgemv_accum8x4(float *out, const qweight *weights, int __m256 vxj; __m256 vw; id = *idx++; - vxj = _mm256_broadcast_ss(&x[4*id]); + vxj = _mm256_broadcast_ss(&x[id]); vw = _mm256_loadu_ps(&weights[0]); vy0 = _mm256_fmadd_ps(vw, vxj, vy0); - vxj = _mm256_broadcast_ss(&x[4*id+1]); + vxj = _mm256_broadcast_ss(&x[id+1]); vw = _mm256_loadu_ps(&weights[8]); vy0 = _mm256_fmadd_ps(vw, vxj, vy0); - vxj = _mm256_broadcast_ss(&x[4*id+2]); + vxj = _mm256_broadcast_ss(&x[id+2]); vw = _mm256_loadu_ps(&weights[16]); vy0 = _mm256_fmadd_ps(vw, vxj, vy0); - vxj = _mm256_broadcast_ss(&x[4*id+3]); + vxj = _mm256_broadcast_ss(&x[id+3]); vw = _mm256_loadu_ps(&weights[24]); vy0 = _mm256_fmadd_ps(vw, vxj, vy0); diff --git a/dnn/vec_neon.h b/dnn/vec_neon.h index a964f75166fc3a88762f0a0b466281521853e9a1..1a4a4ce5f453b09bad8149135c3f772c3eae51e9 100644 --- a/dnn/vec_neon.h +++ b/dnn/vec_neon.h @@ -333,7 +333,7 @@ static inline void sparse_sgemv_accum8x4(float *_out, const qweight *w, int rows for (j=0;j<colblocks;j++) { int pos; - pos = 4 * (*idx++); + pos = (*idx++); int8x16_t vw0, vw1, vx; vx = (int8x16_t)vld1q_dup_s32((int*)&x[pos]); vw0 = vld1q_s8(w);