Hackster is hosting Hackster Holidays, Ep. 6: Livestream & Giveaway Drawing. Watch previous episodes or stream live on Monday!Stream Hackster Holidays, Ep. 6 on Monday!
Ex Machina
Published © GPL3+

Bionic Hand Voice Commands Module w/ Edge Impulse & Arduino

A TinyML keyword spotting application using Arduino Nano 33 BLE Sense to control gesture inferences of a bionic 3D printed hand

AdvancedFull instructions provided2 days4,588
Bionic Hand Voice Commands Module w/ Edge Impulse & Arduino

Things used in this project

Hardware components

Nano 33 BLE Sense
Arduino Nano 33 BLE Sense
×1
SG90 Micro-servo motor
SG90 Micro-servo motor
×5
Breadboard (generic)
Breadboard (generic)
×1
Jumper wires (generic)
Jumper wires (generic)
×1

Software apps and online services

Edge Impulse Studio
Edge Impulse Studio
Arduino IDE
Arduino IDE

Hand tools and fabrication machines

3D Printer (generic)
3D Printer (generic)

Story

Read more

Code

Keyword Spotting for Bionic Hand

C/C++
The full code deployed to the Arduino Nano 33 BLE Sense
/* Edge Impulse Arduino examples
 * Copyright (c) 2021 EdgeImpulse Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

// If your target is limited in memory remove this macro to save 10K RAM
#define EIDSP_QUANTIZE_FILTERBANK   0

/*
 ** NOTE: If you run into TFLite arena allocation issue.
 **
 ** This may be due to may dynamic memory fragmentation.
 ** Try defining "-DEI_CLASSIFIER_ALLOCATION_STATIC" in boards.local.txt (create
 ** if it doesn't exist) and copy this file to
 ** `<ARDUINO_CORE_INSTALL_PATH>/arduino/hardware/<mbed_core>/<core_version>/`.
 **
 ** See
 ** (https://support.arduino.cc/hc/en-us/articles/360012076960-Where-are-the-installed-cores-located-)
 ** to find where Arduino installs cores on your machine.
 **
 ** If the problem persists then there's not enough memory for this model and application.
 */

/* Includes ---------------------------------------------------------------- */
#include <PDM.h>
#include <Keyword_Spotting_for_Bionic_Hand_inferencing.h>

#include <Servo.h>
#define s_1 D7
#define s_2 D6
#define s_3 D5
#define s_4 D4
#define s_5 D3


Servo s1;
Servo s2;
Servo s3;
Servo s4;
Servo s5;

int ang0;
int ang180;

/** Audio buffers, pointers and selectors */
typedef struct {
    int16_t *buffer;
    uint8_t buf_ready;
    uint32_t buf_count;
    
    uint32_t n_samples;
} inference_t;

static inference_t inference;
static signed short sampleBuffer[2048];
static bool debug_nn = false; // Set this to true to see e.g. features generated from the raw signal

/**
 * @brief      Arduino setup function
 */
void setup()
{
    // put your setup code here, to run once:
    Serial.begin(115200);

    Serial.println("Edge Impulse Inferencing Demo");

    ang0 = 10;
    ang180 = 170;
    
    s1.attach(s_1);
    s1.write(ang0);
    s2.attach(s_2);
    s2.write(ang0);
    s3.attach(s_3);
    s3.write(ang0);
    s4.attach(s_4);
    s4.write(ang0);
    s5.attach(s_5);
    s5.write(ang0);
  
    pinMode(LEDR, OUTPUT);
    pinMode(LEDG, OUTPUT);
    pinMode(LEDB, OUTPUT);

    digitalWrite(LEDR, HIGH);
    digitalWrite(LEDG, HIGH);
    digitalWrite(LEDB, HIGH);

    // summary of inferencing settings (from model_metadata.h)
    ei_printf("Inferencing settings:\n");
    ei_printf("\tInterval: %.2f ms.\n", (float)EI_CLASSIFIER_INTERVAL_MS);
    ei_printf("\tFrame size: %d\n", EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE);
    ei_printf("\tSample length: %d ms.\n", EI_CLASSIFIER_RAW_SAMPLE_COUNT / 16);
    ei_printf("\tNo. of classes: %d\n", sizeof(ei_classifier_inferencing_categories) / sizeof(ei_classifier_inferencing_categories[0]));

    if (microphone_inference_start(EI_CLASSIFIER_RAW_SAMPLE_COUNT) == false) {
        ei_printf("ERR: Failed to setup audio sampling\r\n");
        return;
    }
}

void turn_off_leds(){
    digitalWrite(LEDR, HIGH);
    digitalWrite(LEDG, HIGH);
    digitalWrite(LEDB, HIGH);

}

void turn_on_leds(int pred_index){
  switch (pred_index)
  {
    case 0: //dois
      turn_off_leds();
      digitalWrite(LEDR, LOW);
      s1.write(ang180);
      s2.write(ang180);
      s3.write(ang0);
      s4.write(ang0);
      s5.write(ang180);
      break;
      case 1: //joia
      turn_off_leds();
      digitalWrite(LEDG, LOW);
      s1.write(ang180);
      s2.write(ang180);
      s3.write(ang180);
      s4.write(ang180);
      s5.write(ang0);
      break;
      case 2: //nada  
      turn_off_leds();
      s1.write(ang0);
      s2.write(ang0);
      s3.write(ang0);
      s4.write(ang0);
      s5.write(ang0);  
      break;
       case 3: //ok
       turn_off_leds();
      digitalWrite(LEDB, LOW);
      s1.write(ang0);
      s2.write(ang0);
      s3.write(ang0);
      s4.write(ang180);
      s5.write(ang180);
      break;
      case 4: //rock
       turn_off_leds();
      digitalWrite(LEDB, LOW);
      digitalWrite(LEDG, LOW);
      s1.write(ang0);
      s2.write(ang180);
      s3.write(ang180);
      s4.write(ang0);
      s5.write(ang180);
      break;
      case 5: //um
       turn_off_leds();
      digitalWrite(LEDG, LOW);
      digitalWrite(LEDR, LOW);
      s1.write(ang180);
      s2.write(ang180);
      s3.write(ang180);
      s4.write(ang0);
      s5.write(ang180);
      break;
  }
}

/**
 * @brief      Arduino main function. Runs the inferencing loop.
 */
void loop()
{
    ei_printf("Starting inferencing in 2 seconds...\n");
    digitalWrite(LEDB, LOW);
    digitalWrite(LEDR, LOW);
    delay(2000);
    
    turn_off_leds();
    ei_printf("Recording...\n");
    
    bool m = microphone_inference_record();
    if (!m) {
        ei_printf("ERR: Failed to record audio...\n");
        return;
    }

    ei_printf("Recording done\n");

    signal_t signal;
    signal.total_length = EI_CLASSIFIER_RAW_SAMPLE_COUNT;
    signal.get_data = &microphone_audio_signal_get_data;
    ei_impulse_result_t result = { 0 };

    EI_IMPULSE_ERROR r = run_classifier(&signal, &result, debug_nn);
    if (r != EI_IMPULSE_OK) {
        ei_printf("ERR: Failed to run classifier (%d)\n", r);
        return;
    }

    // print the predictions
    ei_printf("Predictions ");
    ei_printf("(DSP: %d ms., Classification: %d ms., Anomaly: %d ms.)",
        result.timing.dsp, result.timing.classification, result.timing.anomaly);
    ei_printf(": \n");

    int pred_index = 0;
    float pred_value = 0;
    
    for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) {
        //ei_printf("    %s: %.5f\n", result.classification[ix].label, result.classification[ix].value);
        if (result.classification[ix].value > pred_value){
          pred_index = ix;
          pred_value = result.classification[ix].value;
        }
    }
    ei_printf("\nPredictions ");
    ei_printf(" PREDICTION: ==> \\\\ %s //// with probalility %.2f\n",
        result.classification[pred_index].label,pred_value);
    ei_printf(": \n");
    turn_on_leds(pred_index);
    delay(1500);
    turn_off_leds();
      s1.write(ang0);
      s2.write(ang0);
      s3.write(ang0);
      s4.write(ang0);
      s5.write(ang0);
    delay(1000);
    
#if EI_CLASSIFIER_HAS_ANOMALY == 1
    ei_printf("    anomaly score: %.3f\n", result.anomaly);
#endif
}



/**
 * @brief      PDM buffer full callback
 *             Get data and call audio thread callback
 */
static void pdm_data_ready_inference_callback(void)
{
    int bytesAvailable = PDM.available();

    // read into the sample buffer
    int bytesRead = PDM.read((char *)&sampleBuffer[0], bytesAvailable);

    if (inference.buf_ready == 0) {
        for(int i = 0; i < bytesRead>>1; i++) {
            inference.buffer[inference.buf_count++] = sampleBuffer[i];

            if(inference.buf_count >= inference.n_samples) {
                inference.buf_count = 0;
                inference.buf_ready = 1;
                break;
            }
        }
    }
}

/**
 * @brief      Init inferencing struct and setup/start PDM
 *
 * @param[in]  n_samples  The n samples
 *
 * @return     { description_of_the_return_value }
 */
static bool microphone_inference_start(uint32_t n_samples)
{
    inference.buffer = (int16_t *)malloc(n_samples * sizeof(int16_t));

    if(inference.buffer == NULL) {
        return false;
    }

    inference.buf_count  = 0;
    inference.n_samples  = n_samples;
    inference.buf_ready  = 0;

    // configure the data receive callback
    PDM.onReceive(&pdm_data_ready_inference_callback);

    PDM.setBufferSize(4096);

    // initialize PDM with:
    // - one channel (mono mode)
    // - a 16 kHz sample rate
    if (!PDM.begin(1, EI_CLASSIFIER_FREQUENCY)) {
        ei_printf("Failed to start PDM!");
        microphone_inference_end();

        return false;
    }

    // set the gain, defaults to 20
    PDM.setGain(127);

    return true;
}

/**
 * @brief      Wait on new data
 *
 * @return     True when finished
 */
static bool microphone_inference_record(void)
{
    inference.buf_ready = 0;
    inference.buf_count = 0;

    while(inference.buf_ready == 0) {
        delay(10);
    }

    return true;
}

/**
 * Get raw audio signal data
 */
static int microphone_audio_signal_get_data(size_t offset, size_t length, float *out_ptr)
{
    numpy::int16_to_float(&inference.buffer[offset], out_ptr, length);

    return 0;
}

/**
 * @brief      Stop PDM and release buffers
 */
static void microphone_inference_end(void)
{
    PDM.end();
    free(inference.buffer);
}

#if !defined(EI_CLASSIFIER_SENSOR) || EI_CLASSIFIER_SENSOR != EI_CLASSIFIER_SENSOR_MICROPHONE
#error "Invalid model for current sensor."
#endif

Credits

Ex Machina

Ex Machina

1 project • 7 followers
https://linktr.ee/exmachinaproject

Comments