Hackster is hosting Hackster Holidays, Ep. 6: Livestream & Giveaway Drawing. Watch previous episodes or stream live on Monday!Stream Hackster Holidays, Ep. 6 on Monday!
Timothy Lovett
Published © GPL3+

Smart Escalating Assistant Communicator

Voice-activated bedside assistant ensuring reliable, multi-level caregiver alerts regardless of location.

IntermediateFull instructions providedOver 2 days169

Things used in this project

Hardware components

Blues Notecard (Cellular)
Blues Notecard (Cellular)
×1
Blues Notecarrier A
Blues Notecarrier A
Used to send Twilio messages and report on the location of the device when location mode is enabled
×1
WeAct 2.9 Inch Epaper Display
For an updating screen that uses minimal power
×1
Seeed XIAO BLE nRF52840 Sense
Seeed Studio Seeed XIAO BLE nRF52840 Sense
×1

Software apps and online services

Edge Impulse Studio
Edge Impulse Studio
Fusion
Autodesk Fusion

Story

Read more

Custom parts and enclosures

SEAC model

Split objects apart, use support where needed

Code

seac-code

C/C++
SEAC code, libraries for notecard, arduino json, and the cosmic bee project associated with my edge impulse project are needed to build
/* Edge Impulse ingestion SDK
 * Copyright (c) 2022 EdgeImpulse Inc.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *
 */

#define EIDSP_QUANTIZE_FILTERBANK   0

/* Includes ---------------------------------------------------------------- */
#include <PDM.h>
#include <CosmicBee-project-1_inferencing.h>
#include <Notecard.h>
#include <ArduinoJson.h> 

Notecard notecard;

/** Audio buffers, pointers and selectors */
typedef struct {
    int16_t *buffer;
    uint8_t buf_ready;
    uint32_t buf_count;
    uint32_t n_samples;
} inference_t;

enum DetectedWord {
    WORD_HELP,
    WORD_LIGHTS,
    WORD_OTHER,
    WORD_SEAC
};

DetectedWord get_highest_confidence_word(const ei_impulse_result_t &result) {
    float max_confidence = 0.0;
    size_t max_index = 0;

    for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) {
        if (result.classification[ix].value > max_confidence) {
            max_confidence = result.classification[ix].value;
            max_index = ix;
        }
    }

    switch (max_index) {
        case 0: return WORD_HELP;
        case 1: return WORD_LIGHTS;
        case 2: return WORD_OTHER;
        case 3: return WORD_SEAC;
        default: return WORD_OTHER; // Default case if no match
    }
}


static inference_t inference;
static signed short sampleBuffer[2048];
static bool debug_nn = false; // Set this to true to see e.g. features generated from the raw signal
unsigned long lastWordTime = 0; // Time when the last word was detected
const unsigned long wordTimeout = 10000; // 10 seconds timeout
bool isLedOn = false; 
unsigned long lastUpdateTime = 0;
const unsigned long updateInterval = 60000; 

void setup()
{
    notecard.begin(Serial1);

    pinMode(D4, OUTPUT);
    digitalWrite(D4, LOW); 

    pinMode(5, OUTPUT);
    digitalWrite(5, LOW);

    display.init(115200);
    TimezoneInfo currentTimezoneInfo = getTimeAndZone();
    displayClock(currentTimezoneInfo);
    display.hibernate();

    if (microphone_inference_start(EI_CLASSIFIER_RAW_SAMPLE_COUNT) == false) {
        return;
    }
}

void loop() {
    bool m = microphone_inference_record();
    if (!m) {
        return;
    }

    signal_t signal;
    signal.total_length = EI_CLASSIFIER_RAW_SAMPLE_COUNT;
    signal.get_data = &microphone_audio_signal_get_data;
    ei_impulse_result_t result = { 0 };

    EI_IMPULSE_ERROR r = run_classifier(&signal, &result, debug_nn);
    if (r != EI_IMPULSE_OK) {
        return;
    }

    DetectedWord detected_word = get_highest_confidence_word(result);

    // Check if SEAC is said or if another word (except OTHER) is said within 5 seconds after SEAC
    if (detected_word == WORD_SEAC || (millis() - lastWordTime <= wordTimeout && detected_word != WORD_OTHER)) {
        if (detected_word == WORD_SEAC) {
            lastWordTime = millis(); // Reset timer
            digitalWrite(5, HIGH); // Turn on the buzzer
            delay(100);            // Buzzer on for 100ms
            digitalWrite(5, LOW);  // Turn off the buzzer
        } else if (millis() - lastWordTime <= wordTimeout) {
            switch(detected_word) {
                case WORD_LIGHTS:
                    digitalWrite(5, HIGH); // Turn on the buzzer
                    delay(200);            // Buzzer on for 100ms
                    digitalWrite(5, LOW);  // Turn off the buzzer
                    toggleLights(); // Toggle the RGB LED on or off
                    break;
                case WORD_HELP:
                    digitalWrite(5, HIGH); // Turn on the buzzer
                    delay(200);            // Buzzer on for 100ms
                    digitalWrite(5, LOW);  // Turn off the buzzer
                    J *req = notecard.newRequest("note.add");
                    if (req) {
                        JAddBoolToObject(req, "sync", true);
                        JAddStringToObject(req, "file", "alert.qo");
                        J* body = JAddObjectToObject(req, "body");
                        JAddStringToObject(body, "customMessage", "Alert, in need of immediate assistance!");
                        if (!notecard.sendRequest(req)) {
                            while(1);
                        }
                    }
                    break;
            }
        }
    }
}

void toggleLights() {
    if (isLedOn) {
        digitalWrite(D4, LOW); 
        isLedOn = false;
    } else {
        digitalWrite(D4, HIGH); 
        isLedOn = true;
    }
}


/**
 * @brief      PDM buffer full callback
 *             Get data and call audio thread callback
 */
static void pdm_data_ready_inference_callback(void)
{
    int bytesAvailable = PDM.available();

    // read into the sample buffer
    int bytesRead = PDM.read((char *)&sampleBuffer[0], bytesAvailable);

    if (inference.buf_ready == 0) {
        for(int i = 0; i < bytesRead>>1; i++) {
            inference.buffer[inference.buf_count++] = sampleBuffer[i];

            if(inference.buf_count >= inference.n_samples) {
                inference.buf_count = 0;
                inference.buf_ready = 1;
                break;
            }
        }
    }
}

/**
 * @brief      Init inferencing struct and setup/start PDM
 *
 * @param[in]  n_samples  The n samples
 *
 * @return     { description_of_the_return_value }
 */
static bool microphone_inference_start(uint32_t n_samples)
{
    inference.buffer = (int16_t *)malloc(n_samples * sizeof(int16_t));

    if(inference.buffer == NULL) {
        return false;
    }

    inference.buf_count  = 0;
    inference.n_samples  = n_samples;
    inference.buf_ready  = 0;

    // configure the data receive callback
    PDM.onReceive(&pdm_data_ready_inference_callback);

    PDM.setBufferSize(4096);

    // initialize PDM with:
    // - one channel (mono mode)
    // - a 16 kHz sample rate
    if (!PDM.begin(1, EI_CLASSIFIER_FREQUENCY)) {
        ei_printf("Failed to start PDM!");
        microphone_inference_end();

        return false;
    }

    // set the gain, defaults to 20
    PDM.setGain(127);

    return true;
}

/**
 * @brief      Wait on new data
 *
 * @return     True when finished
 */
static bool microphone_inference_record(void)
{
    inference.buf_ready = 0;
    inference.buf_count = 0;

    while(inference.buf_ready == 0) {
        delay(10);
    }

    return true;
}

/**
 * Get raw audio signal data
 */
static int microphone_audio_signal_get_data(size_t offset, size_t length, float *out_ptr)
{
    numpy::int16_to_float(&inference.buffer[offset], out_ptr, length);

    return 0;
}

/**
 * @brief      Stop PDM and release buffers
 */
static void microphone_inference_end(void)
{
    PDM.end();
    free(inference.buffer);
}

#if !defined(EI_CLASSIFIER_SENSOR) || EI_CLASSIFIER_SENSOR != EI_CLASSIFIER_SENSOR_MICROPHONE
#error "Invalid model for current sensor."
#endif

Credits

Timothy Lovett

Timothy Lovett

16 projects • 16 followers
Maker. I spent over a decade working on backend systems in various languages.

Comments