diff --git a/dnn/nnet.c b/dnn/nnet.c
index 60bde585d70b9bbc01abed8d7a5bf3b549cdeea2..3661ba77fdf114737dfc7336cfd7b26d50fd50cb 100644
--- a/dnn/nnet.c
+++ b/dnn/nnet.c
@@ -415,7 +415,7 @@ void conv2d_float(float *out, const float *weights, int in_channels, int out_cha
 
 #define MAX_CONV2D_INPUTS 2048
 
-void compute_conv2d(const Conv2DLayer *conv, float *out, float *mem, const float *in, int len2, int activation)
+void compute_conv2d(const Conv2dLayer *conv, float *out, float *mem, const float *in, int len2, int activation)
 {
    int i;
    const float *bias;
diff --git a/dnn/nnet.h b/dnn/nnet.h
index 386d204de5f878b0143a69f28eb6a1db3f8c455f..16ce82babf528bdb739f61cdccf503a836c4d214 100644
--- a/dnn/nnet.h
+++ b/dnn/nnet.h
@@ -83,7 +83,7 @@ typedef struct {
   int out_channels;
   int ktime;
   int kheight;
-} Conv2DLayer;
+} Conv2dLayer;
 
 typedef struct {
   const float *bias;
@@ -175,6 +175,7 @@ extern const WeightArray lpcnet_plc_arrays[];
 extern const WeightArray rdovaeenc_arrays[];
 extern const WeightArray rdovaedec_arrays[];
 extern const WeightArray fwgan_arrays[];
+extern const WeightArray pitchdnn_arrays[];
 
 int linear_init(LinearLayer *layer, const WeightArray *arrays,
   const char *bias,
@@ -232,6 +233,8 @@ int conv1d_init(Conv1DLayer *layer, const WeightArray *arrays,
   int nb_neurons,
   int activation);
 
+void compute_conv2d(const Conv2dLayer *conv, float *out, float *mem, const float *in, int len2, int activation);
+
 int embedding_init(EmbeddingLayer *layer, const WeightArray *arrays,
   const char *embedding_weights,
   int nb_inputs,
diff --git a/dnn/pitchdnn.c b/dnn/pitchdnn.c
new file mode 100644
index 0000000000000000000000000000000000000000..5a35936ce056ea62c55189f4abb13f9408983018
--- /dev/null
+++ b/dnn/pitchdnn.c
@@ -0,0 +1,61 @@
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <math.h>
+#include "pitchdnn.h"
+#include "os_support.h"
+#include "nnet.h"
+#include "lpcnet_private.h"
+
+
+int compute_pitchdnn(
+    PitchDNNState *st,
+    const float *if_features,
+    const float *xcorr_features
+    )
+{
+  float if1_out[DENSE_IF_UPSAMPLER_1_OUT_SIZE];
+  float downsampler_in[NB_XCORR_FEATURES + DENSE_IF_UPSAMPLER_2_OUT_SIZE];
+  float downsampler_out[DENSE_DOWNSAMPLER_OUT_SIZE];
+  float conv1_tmp1[NB_XCORR_FEATURES + 2] = {0};
+  float conv1_tmp2[NB_XCORR_FEATURES + 2] = {0};
+  float output[DENSE_FINAL_UPSAMPLER_OUT_SIZE];
+  int i;
+  int pos=0;
+  float maxval=-1;
+  PitchDNN *model = &st->model;
+
+  /* IF */
+  compute_generic_dense(&model->dense_if_upsampler_1, if1_out, if_features, ACTIVATION_TANH);
+  compute_generic_dense(&model->dense_if_upsampler_2, &downsampler_in[NB_XCORR_FEATURES], if1_out, ACTIVATION_TANH);
+
+  /* xcorr*/
+  OPUS_COPY(&conv1_tmp1[1], xcorr_features, NB_XCORR_FEATURES);
+  compute_conv2d(&model->conv2d_1, &conv1_tmp2[1], st->xcorr_mem1, conv1_tmp1, NB_XCORR_FEATURES, ACTIVATION_TANH);
+  compute_conv2d(&model->conv2d_1, &conv1_tmp1[1], st->xcorr_mem2, conv1_tmp2, NB_XCORR_FEATURES, ACTIVATION_TANH);
+  compute_conv2d(&model->conv2d_1, downsampler_in, st->xcorr_mem3, conv1_tmp1, NB_XCORR_FEATURES, ACTIVATION_TANH);
+
+  compute_generic_dense(&model->dense_downsampler, downsampler_out, downsampler_in, ACTIVATION_TANH);
+  compute_generic_gru(&model->gru_1_input, &model->gru_1_recurrent, st->gru_state, downsampler_out);
+  compute_generic_dense(&model->dense_final_upsampler, output, st->gru_state, ACTIVATION_LINEAR);
+
+  for (i=0;i<DENSE_FINAL_UPSAMPLER_OUT_SIZE;i++) {
+    if (output[i] > maxval) {
+      pos = i;
+      maxval = output[i];
+    }
+  }
+  return (1.f/60.f)*pos - 1.5;
+  /*return 256.f/pow(2.f, (1.f/60.f)*i);*/
+}
+
+
+void pitchdnn_init(PitchDNNState *st)
+{
+  int ret;
+  OPUS_CLEAR(st, 1);
+  ret = init_pitchdnn(&st->model, pitchdnn_arrays);
+  celt_assert(ret == 0);
+  /* FIXME: perform arch detection. */
+}
diff --git a/dnn/pitchdnn.h b/dnn/pitchdnn.h
new file mode 100644
index 0000000000000000000000000000000000000000..74eacd77d97bc2d2592c6e836647a02097219e80
--- /dev/null
+++ b/dnn/pitchdnn.h
@@ -0,0 +1,30 @@
+#ifndef PITCHDNN_H
+#define PITCHDNN_H
+
+
+typedef struct PitchDNN PitchDNN;
+
+#include "pitchdnn_data.h"
+#include "lpcnet_private.h"
+
+#define NB_XCORR_FEATURES (PITCH_MAX_PERIOD-PITCH_MIN_PERIOD)
+
+
+typedef struct {
+  PitchDNN model;
+  float gru_state[GRU_1_STATE_SIZE];
+  float xcorr_mem1[(NB_XCORR_FEATURES + 2)*2];
+  float xcorr_mem2[(NB_XCORR_FEATURES + 2)*2*8];
+  float xcorr_mem3[(NB_XCORR_FEATURES + 2)*2*8];
+} PitchDNNState;
+
+
+void pitchdnn_init(PitchDNNState *st);
+
+int compute_pitchdnn(
+    PitchDNNState *st,
+    const float *if_features,
+    const float *xcorr_features
+    );
+
+#endif
diff --git a/dnn/torch/neural-pitch/export_neuralpitch_weights.py b/dnn/torch/neural-pitch/export_neuralpitch_weights.py
index 9f20ec9e7ba96382b3c173b4f2c8f7620ec3a2a0..cab8eaeb0176b9fd6d3cd7698c024f741e6f6bb8 100644
--- a/dnn/torch/neural-pitch/export_neuralpitch_weights.py
+++ b/dnn/torch/neural-pitch/export_neuralpitch_weights.py
@@ -52,7 +52,7 @@ def c_export(args, model):
 
     message = f"Auto generated from checkpoint {os.path.basename(args.checkpoint)}"
 
-    writer = CWriter(os.path.join(args.output_dir, "neural_pitch_data"), message=message, model_struct_name='PitchDNN')
+    writer = CWriter(os.path.join(args.output_dir, "pitchdnn_data"), message=message, model_struct_name='PitchDNN')
     writer.header.write(
 f"""
 #include "opus_types.h"