Zora CheHana Chitsaz
Published

Musical Cocoon

An interactive and transformative audiovisual art experience.

IntermediateFull instructions provided-120 minutes99
Musical Cocoon

Things used in this project

Hardware components

Adafruit Flexible Addressable LED Strip
×2
ESP32
Espressif ESP32
×1
Stepper Motor
×1
Uninterruptible Power Supply (UPS), 24 V
Uninterruptible Power Supply (UPS), 24 V
×1
Microstep Driver
×1

Hand tools and fabrication machines

Drill / Driver, Cordless
Drill / Driver, Cordless
Laser cutter (generic)
Laser cutter (generic)
3D Printer (generic)
3D Printer (generic)

Story

Read more

Custom parts and enclosures

spur_gear_(24_teeth).stl

The inner cocoon is threaded through this gear. This gear is moved by the gear attached to the motor.

spur_gear_(12_teeth).stl

The motor is attached to this gear.

inner_cocoon_boning.stl

This boning is sewed inside the inner cocoon to create more space for a person to stand, and generate a more oval shape.

Pulley Design

Although we ultimately did not use it, it did help our inspiration and our prototyping process

inner_cocoon_ring.stl

The curtains of the inner cocoon is attached and suspended onto this ring

inner_cocoon_holder.stl

The top of the inner cocoon is attached to this ring

Schematics

Schematics

Code

gui.py

Python
This is the code that launches the GUI.
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
import beat
import get_mp3

file_name = ""


def process_mp3(_file_name, option):
    # Placeholder for your function that processes the MP3 file
    global file_name
    file_name = _file_name
    print(f"Processing {file_name} with option '{option}'")


def open_file_dialog():
    file_name = filedialog.askopenfilename(filetypes=[("MP3 files", "*.mp3")])
    if file_name:
        selected_option = dropdown_var.get()
        process_mp3(file_name, selected_option)


# Create the main window
root = tk.Tk()
root.title("MP3 File Processor")

# Create a dropdown menu
dropdown_var = tk.StringVar()

color_means_melodic, color_vars_melodic = [[150, 10, 200], [20, 150, 200], [
    255, 0, 0], [10, 10, 10]], [[20, 2, 50], [10, 40, 50], [50, 20, 20], [2, 2, 2]]
color_means_triumphant, color_vars_triumphant = [[250, 10, 10], [200, 10, 200], [
    255, 128, 0], [10, 10, 10]], [[10, 30, 30], [30, 40, 30], [2, 20, 1], [2, 2, 2]]

color_means_transcendant, color_vars_transcendant = [[25, 25, 112], [230, 190, 255], [
    255, 215, 0], [72, 209, 204], [10, 10, 10]], [[10, 10, 10], [10, 10, 10], [10, 10, 10], [10, 10, 10], [10, 10, 10]]

color_means_joyous, color_vars_joyous = [[255, 255, 0], [255, 127, 80], [
    30, 144, 255], [50, 205, 50], [10, 10, 10]], [[10, 10, 10], [10, 10, 10], [10, 10, 10], [10, 10, 10], [10, 10, 10]]

colors = {
    "melodic": [color_means_melodic, color_vars_melodic],
    "triumphant": [color_means_triumphant, color_vars_triumphant],
    "transcendant": [color_means_transcendant, color_vars_transcendant],
    "joyous": [color_means_joyous, color_vars_joyous]
}

dropdown_options = list(colors.keys())  # Replace with your options
dropdown = ttk.Combobox(root, textvariable=dropdown_var,
                        values=dropdown_options)
dropdown.current(0)  # Set the default value
dropdown.pack(pady=10)

input_frame = tk.Frame(root)
input_frame.pack(pady=10)


# Create a button that opens the file dialog
open_file_button = tk.Button(
    input_frame, text="Open MP3 File", command=open_file_dialog)
open_file_button.pack(pady=20)

# Label for "or"
or_label = tk.Label(input_frame, text="or give a Youtube URL:")
or_label.pack(side=tk.LEFT, padx=5)

# Text entry box
input_entry = tk.Entry(input_frame)
input_entry.pack(side=tk.LEFT)

time_stamp_frame = tk.Frame(root)
time_stamp_frame.pack(pady=10)
start_time_label = tk.Label(time_stamp_frame, text="Start Time (e.g., 00:00):")
start_time_label.pack(side=tk.LEFT)
start_time_entry = tk.Entry(time_stamp_frame, width=10)
start_time_entry.pack(side=tk.LEFT, padx=5)

# End time entry
end_time_label = tk.Label(time_stamp_frame, text="End Time (e.g., 05:00):")
end_time_label.pack(side=tk.LEFT)
end_time_entry = tk.Entry(time_stamp_frame, width=10)
end_time_entry.pack(side=tk.LEFT, padx=5)


def on_submit():

    selected_option = dropdown_var.get()
    yt = False
    if input_entry.get().startswith("https://www.youtube.com/"):
        yt = True
        start_time = start_time_entry.get()
        end_time = end_time_entry.get()
        print(f"Start time: {start_time}, End time: {end_time}")
        start_time_seconds = int(start_time.split(":")[0]) * 60 + \
            int(start_time.split(":")[1])
        end_time_seconds = int(end_time.split(":")[0]) * 60 + \
            int(end_time.split(":")[1])
        get_mp3.get_mp3(input_entry.get(),
                        start_time_seconds, end_time_seconds)
        _file_name = get_mp3.yt_download_name + ".trim.mp3"
    else:
        # global file_name
        _file_name = file_name
    # process_mp3(file_name, selected_option)
    print(f"Processing {_file_name} with option '{selected_option}'")
    beat.do_it(_file_name, colors[selected_option]
               [0], colors[selected_option][1])


submit_button = tk.Button(root, text="Submit", command=on_submit)
submit_button.pack(pady=20)

# Run the application
root.mainloop()

get_mp3.py

Python
This is the code that gets mp3 file from youtube link
#!/usr/bin/env python3

import subprocess
from time import sleep
from pytube import YouTube
import pytube
import os

yt_download_name = "YTDownload"
def get_mp3(video_url: str, trim_start: int, trim_end: int):
    # video_url = input('Enter YouTube video URL: ')

    path = os.getcwd() + '/'

    # name = pytube.extract.video_id(video_url)
    YouTube(video_url).streams.filter(only_audio=True).first().download(filename=yt_download_name + ".mp4")
    location = path + yt_download_name + '.mp4'
    renametomp3 = path + yt_download_name + '.mp3'

    os.system('mv {0} {1}'. format(location, renametomp3))
    sleep(1)
    # cmd = f"sox {yt_download_name}.mp3 {yt_download_name}.trim.mp3 trim {trim_start} -{trim_end}"
    cmd = f"ffmpeg -ss {trim_start} -t {trim_end - trim_start} -i {yt_download_name}.mp3 {yt_download_name}.trim.mp3 -y"
    print("CMD: ", cmd)
    os.system(cmd)
    sleep(1)
    # os.system(f'mv {yt_download_name}.trim.mp3 {yt_download_name}.mp3')

beat.py

Python
This is the code that processes music and control the motor and lights
import time
import librosa
import matplotlib.pyplot as plt
import math
import threading
import librosa.display
import numpy as np
import sounddevice as sd
import soundfile as sf
import serial

N_PIXELS = 100
DEBUG = True

ser = serial.Serial('/dev/ttyUSB1', 115200)


i = 0

def sendSerMotor(data: str):
    datas = data.split("\n")
    for data in datas:
        data += "\r\n"
        if DEBUG:
            global i
            i += 1
            print("SENDING DATA MOTOR", i, data)
        ser.write(data.encode())


def sendSerLights(data):
    data += "\r\n"
    if DEBUG:
        global i
        i += 1
        print("SENDING DATA", i, data)
    ser.write(data.encode())


def formatAngleMove(angle, delayMicroSec=500):
    dir = "+" if angle > 0 else "-"
    angle = abs(angle)
    angle = str(angle).zfill(6)[:6]
    delayMicroSec = str(delayMicroSec).zfill(5)[:5]
    return f"{dir}:{angle}:{delayMicroSec}"


def formatPixelSet(pixels: list[int], Rs: list[int], Gs: list[int], Bs: list[int]):
    extra_str = ""
    for i in range(len(pixels)):
        extra_str += f":{str(pixels[i]).zfill(3)}:{str(Rs[i]).zfill(3)}:{str(Gs[i]).zfill(3)}:{str(Bs[i]).zfill(3)}"
    return f"L" + extra_str
    # return f"L:{str(pixel).zfill(3)}:{str(R).zfill(3)}:{str(G).zfill(3)}:{str(B).zfill(3)}"


def save_wav_file(y, sr, file_path):
    sf.write(file_path, y, sr)


def generateGaussianRandomMotorMovement(time_steps: int, volumes_diffs: list[int]):
    # TODO:
    # Max spin time
    N_ZONES = 6
    spin_per_zone = [0, 20, 40, 60, 120, 200]
    prob_spin = [0, 0.08, 0.2, 0.3, 0.4, 0.4]
    diff_min, diff_max = min(volumes_diffs), max(volumes_diffs)
    diff_separation = (diff_max - diff_min) / N_ZONES

    MAX_TOTAL_RET = 400
    MIN_TOTAL_RET = -400

    
    def sample(idx):
        # print(idx, N_ZONES)
        if np.random.random() < prob_spin[idx]:
            return spin_per_zone[idx], 3_000
        else:
            return 0, 0
    cmds: list[str] = []

    def get_volume_diff_partition(i):
        # TODO: WHAT
        return min(int((volumes_diffs[i] - diff_min) // diff_separation), N_ZONES - 1)

    N_STEPS_PER_MOVE = 8
    N_STEPS_PER_BREAK = 8
    last_change = 0
    curr_angle = 0
    curr_dir = 1
    change_period = False
    for i in range(time_steps):
        # TODO:: idk
        # if pixel
        # print("Color command", cmd)
        if i % N_STEPS_PER_MOVE == 0 and change_period == False:
            angle_samp, delay_samp = sample(get_volume_diff_partition(i))
            if curr_angle > MAX_TOTAL_RET and curr_dir == 1:
                change_period = True
                last_change = i
                curr_dir = -1
            elif curr_angle < MIN_TOTAL_RET and curr_dir == -1:
                change_period = True
                last_change = i
                curr_dir = 1
            angle_samp = curr_dir * angle_samp
            cmds.append(formatAngleMove(angle_samp, delay_samp))
            curr_angle += angle_samp
        elif i % N_STEPS_PER_MOVE == 0 and change_period == True and i - last_change > N_STEPS_PER_BREAK:
            change_period = False
            cmds.append("")
        else:
            cmds.append("")
    return cmds


def generateGaussianRandomDownwardLight(time_steps: int, volumes: list[int], gaussian_means: list[list[float]], gaussian_stds: list[list[float]],
                                        n_lights=N_PIXELS, top_pixel_is_max=True):
    """
        Sample from a Gaussian distribution which should have width to around 255ish.
        We will re-sample until we get a value that is within the range of 0-255.
    """
    assert len(gaussian_means) == len(
        gaussian_stds), "Must have same number of means and stds"

    N_VOLUME_SEPARATIONS = 6
    vol_min, vol_max = min(volumes), max(volumes)
    vol_separation = (vol_max - vol_min) / N_VOLUME_SEPARATIONS

    def sample():
        idx = np.random.choice(len(gaussian_means))
        normal_samp = np.random.normal(gaussian_means[idx], gaussian_stds[idx])
        if normal_samp.max() > 255 or normal_samp.min() < 0:
            return sample()
        return normal_samp.round().astype(int)
    cmds: list[str] = []

    if not top_pixel_is_max:
        raise NotImplementedError("Only top pixel is max is implemented")
    curr_light_step = 0

    def get_volume_partition(i):
        return int((volumes[i] - vol_min) // vol_separation)

    for i in range(time_steps):
        # TODO:: idk
        # if pixel
        # print("Color command", cmd)
        n_pixel_step = int(round(1.7 ** get_volume_partition(i)))
        # TODO: progromatting
        pixels = []
        Rs = []
        Gs = []
        Bs = []
        for j in range(n_pixel_step):
            pixel = (n_lights - curr_light_step % n_lights) - 1
            pixels.append(pixel)
            R, G, B = sample()
            Rs.append(R)
            Gs.append(G)
            Bs.append(B)
            curr_light_step += 1
        s = formatPixelSet(pixels, Rs, Gs, Bs)
        cmds.append(s)
    return cmds
    # Start


def amplify_on_downbeats(y, sr, downbeat_times, window=0.1, amplification_factor=1.5):
    # Calculate the number of samples to mute based on mute_duration
    mute_samples = int(window * sr)

    # For each downbeat, mute the signal
    for downbeat_time in downbeat_times:
        start_sample = int(round(downbeat_time))
        end_sample = start_sample + mute_samples
        print(sr, start_sample, end_sample, len(y))
        y[start_sample:end_sample] *= amplification_factor
    return y

def get_sample_moments(y, sr, downbeat_subdivisions=16):
    # Calculate the onset envelope
    onset_env = librosa.onset.onset_strength(y=y, sr=sr)

    # Use the librosa's beat tracker with modified tightness
    # Adjust the tightness parameter for more flexibility in tempo variations
    tempo, beats = librosa.beat.beat_track(
        y=y, sr=sr, onset_envelope=onset_env, tightness=100, units='samples')
    print(sr)
    # exit()

    # If you expect frequent tempo changes, you might divide the audio into segments and track beats segment-wise
    # But in this simple example, we'll just try to identify downbeats from the tracked beats

    downbeats = [beats[0]]
    # measures = int(tempo) # Initial assumption

    print(len(beats))
    for i in range(1, len(beats)):
        # Dynamically adjust the expected measure interval based on adjacent beat intervals
        INTERV = 1
        if (i - 1) % INTERV == 0:
            downbeats.append(beats[i])

    def add_downbeat_time_steps(mul_fact=16):
        volumes = []
        volumes_diffs = []
        steps = []
        y_abs = np.abs(y)
        for i, _ in enumerate(downbeats):
            if i >= 1:
                period = downbeats[i] - downbeats[i - 1]
                for j in range(mul_fact):
                    steps.append(
                        int(round(downbeats[i - 1] + (period / mul_fact) * j)))

                    # Make it 1 second for now?
                    window_size = sr * 2
                    avg = sum(y_abs[beats[i] - window_size //
                              2:beats[i] + window_size // 2]) / window_size
                    if i == 1 and j == 0:
                        volumes_diffs.append(0)
                    else:
                        diff = avg - volumes[-1]
                        volumes_diffs.append(diff)
                    volumes.append(avg)
        return steps, volumes, volumes_diffs

    sample_timestamps, volumes, volume_diffs = add_downbeat_time_steps()
    # Plotting the volumes
    plt.figure(figsize=(10, 6))
    plt.plot(sample_timestamps, volumes, marker='o')
    plt.title('Average Volume at Downbeats')
    plt.xlabel('Sample Index')
    plt.ylabel('Average Volume')
    plt.grid(True)
    plt.savefig("test_vol.png")

    plt.figure(figsize=(10, 6))
    plt.plot(sample_timestamps, volume_diffs, marker='o')
    plt.title('Average Volume Diff at Downbeats')
    plt.xlabel('Sample Index')
    plt.ylabel('Average Volume Diff')
    plt.grid(True)
    plt.savefig("test_vol_diff.png")

    return sample_timestamps, volumes, volume_diffs


def send_motor_commands_at_downbeats(downbeat_times, sr, cmds):
    for i, downbeat_time in enumerate(downbeat_times):
        # Calculate the time to wait until the next downbeat
        wait_time = downbeat_time / sr
        # direction = 1 if i % 2 == 0 else -1
        threading.Timer(wait_time, sendSerMotor, args=[cmds[i]]).start()


def send_light_commands_at_downbeats(downbeat_times, sr, cmds):
    for i, downbeat_time in enumerate(downbeat_times):
        # print(cmds[i], i)
        # Calculate the time to wait until the next downbeat
        wait_time = downbeat_time / sr
        threading.Timer(wait_time, sendSerLights, args=[cmds[i]]).start()


def clearLights():
    sendSerMotor("C")


def do_it(filename, color_means, color_vars, window_length=0.2, amplification_factor=1.2):
    # Load the audio file
    y, sr = librosa.load(filename)
    time_steps, volumes, volumes_diffs = get_sample_moments(y, sr)

    # y_amplified = amplify_on_downbeats(
    #     y, sr, downbeat_times, amplification_factor=amplification_factor, window=window_length)

    clearLights()
    time.sleep(1)
    # Save the modified file
    # save_wav_file(y, sr, "out.wav")

    lightCmds = generateGaussianRandomDownwardLight(
        len(time_steps), volumes, color_means, color_vars)
    # motorCmds = generateGaussianRandomMotorMovement(
    #     len(time_steps), volumes_diffs)
    motorCmds = generateGaussianRandomMotorMovement(
        len(time_steps), volumes)
    # print("LEN LIGHTS", len(time_steps), len(lightCmds))

    # Create a thread for playing the music
    play_thread = threading.Thread(target=sd.play, args=(y, sr))

    # Start the play thread

    # Send serial commands at downbeats in a separate thread
    # send_motor_commands_thread = threading.Thread(
    #     target=send_serial_commands_at_downbeats, args=(time_steps, sr))

    send_light_commands_thread = threading.Thread(
        target=send_light_commands_at_downbeats, args=(time_steps, sr, lightCmds))

    send_motor_commands_thread = threading.Thread(
        target=send_motor_commands_at_downbeats, args=(time_steps, sr, motorCmds))

    play_thread.start()
    # send_motor_commands_thread.start()
    send_light_commands_thread.start()
    send_motor_commands_thread.start()

    # Wait for the play thread to finish
    play_thread.join()
    # send_motor_commands_thread.join()
    send_light_commands_thread.join()
    send_motor_commands_thread.join()

# def do_it(filename, window_length=0.2, amplification_factor=.1):
#     # Load the audio file
#     y, sr = librosa.load(filename)
#     downbeat_times = get_downbeat_sample_moments(y, sr)
#     y_amplified = amplify_on_downbeats(y, sr, downbeat_times, amplification_factor=amplification_factor, window=window_length)

#     save_wav_file(y, sr, "out.wav")
#     sd.play(y_amplified, samplerate=sr)
#     sd.wait()


if __name__ == "__main__":
    # Example:
    color_means_melodic, color_vars_melodic = [[150, 10, 200], [20, 150, 200], [
        255, 0, 0], [10, 10, 10]], [[20, 2, 50], [10, 40, 50], [50, 20, 20], [2, 2, 2]]
    color_means_triumphant, color_vars_triumphant = [[250, 10, 10], [200, 10, 200], [
        255, 128, 0], [10, 10, 10]], [[10, 30, 30], [30, 40, 30], [2, 20, 1], [2, 2, 2]]
    # filename = 'EtudeC.mp3'
    filename = 'GirlFellTrimmed.mp3'
    # filename = 'Canon_in_F_minor.mp3'
    # filename = 'GirlFellTrimmed.mp3 '
    # filename = 'raindrop.mp3'
    upbeats = do_it(filename, color_means_melodic, color_vars_melodic)
    # print(upbeats)

project_arduino_control.ino

Arduino
This is the arduino code that controls the motor and lights. We used this file during testing.
#include <Adafruit_NeoPixel.h>

#define DIR_PIN 19  // Direction pin
#define STEP_PIN 18 // Step pin
#define STEPS_PER_REV 200 // Change this to fit the number of steps per revolution for your motor

#define PIN_NEO_PIXEL  4  // pin 11 connected to NeoPixel
#define NUM_PIXELS     100  // The number of LEDs (pixels) on NeoPixel
#define DEBUG true
#define BLUETOOTH false

#if BLUETOOTH
  #include "BluetoothSerial.h"
  String device_name = "ESP32-BT-Slave";

  #if !defined(CONFIG_BT_ENABLED) || !defined(CONFIG_BLUEDROID_ENABLED)
    #error Bluetooth is not enabled! Please run `make menuconfig` to and enable it
  #endif

  #if !defined(CONFIG_BT_SPP_ENABLED)
    #error Serial Bluetooth not available or not enabled. It is only available for the ESP32 chip.
  #endif

  BluetoothSerial SerialBT;
  long lastBTSend = 0;
  bool BTConnected = 0;
#endif



Adafruit_NeoPixel strip(NUM_PIXELS, PIN_NEO_PIXEL, NEO_GRB + NEO_KHZ800);

//long ang = 0;
//int delaytime = 1000;

long ang = 0;
long currStepForAng = 0;
int delaytime = 0;
int pixelsR[NUM_PIXELS];
int pixelsG[NUM_PIXELS];
int pixelsB[NUM_PIXELS];


void blue_green_gradient(int all_on=0) {
  strip.clear();
  for (int i = 0; i < NUM_PIXELS; i++) {
    int greenValue = map(i, 0, NUM_PIXELS / 2, 255, 0);
    int blueValue = map(i, 0, NUM_PIXELS / 2, 0, 255);
    
    strip.setPixelColor(i, strip.Color(0, greenValue, blueValue));
    strip.show();
    delay(50); 
  }
  for (int i = 0; i < NUM_PIXELS; i++) {
    strip.setPixelColor(NUM_PIXELS - i - 1, strip.Color(0, 0, 0));
    strip.show();
    delay(50); 
  }
}

void set_strip(int * pixels, int * Rs, int * Gs, int * Bs, int n_pixels) {
//  strip.clear();
//  strip.setPixelColor(pixel, strip.Color(R, G, B));
  for (int i = 0; i <n_pixels; i++) {
    strip.setPixelColor(pixels[i], Rs[i], Gs[i], Bs[i]);
  }
  strip.show();
}

#if BLUETOOTH
void BTConfirmRequestCallback(uint32_t numVal)
{
  BTConnected = false;
  Serial.println(numVal);
}

void BTAuthCompleteCallback(boolean success)
{
  if (success)
  {
    BTConnected = true;
    Serial.println("Pairing success!!");
  }
  else
  {
    Serial.println("Pairing failed, rejected by user!!");
  }
}
#endif


void setup() {
  for (int i = 0; i < NUM_PIXELS; i++) {
    pixelsR[i] = 0;
    pixelsG[i] = 0;
    pixelsB[i] = 0;
  }
  pinMode(DIR_PIN, OUTPUT);
  pinMode(STEP_PIN, OUTPUT);
  Serial.begin(115200);
#if BLUETOOTH
  SerialBT.enableSSP();
  SerialBT.onConfirmRequest(BTConfirmRequestCallback);
  SerialBT.onAuthComplete(BTAuthCompleteCallback);
  SerialBT.begin(device_name); //Bluetooth device name
  Serial.printf("Bluetooh Serial beginning on %s\n", device_name);
#endif
  strip.begin();
  strip.show();
//  if (DEBUG)
//    blue_green_gradient(0);
  strip.clear();
}



void loop() {
//  TODO: remove
//  while (true)
// 
  #if BLUETOTTH
  while (!BTConnected)
  {
    if (Serial.available())
    {
      int dat = Serial.read();
//      SerialBT.confirmReply(true);
      if (dat == 'Y' || dat == 'y')
      {
        SerialBT.confirmReply(true);
      }
      else
      {
        SerialBT.confirmReply(false);
      }
    }
  }
  #endif
  if (
    #if BLUETOOTH
      SerialBT.available() > 0
    #else
      Serial.available() > 0
    #endif
   ) {
    String command = 
      #if BLUETOOTH
        SerialBT.readStringUntil('\n');
//        if (!BTConnected) {
//          BTConnected = true; //uint8_t data = random(0,255);
//          SerialBT.print("ping connect\n");
//          Serial.println("Connected!");
//          lastBTSend = millis();
//        }
      #else
        Serial.readStringUntil('\n'); // Read the command from serial
      #endif

    if ((char) command[0] == 'L') {
      int n_lights = (command.length() - 1) / 16;
      
      int Rs[n_lights];
      int Gs[n_lights];
      int Bs[n_lights];
      int pixels[n_lights];

      for (int j = 0; j < n_lights; j++) {
        pixels[j] = command.substring(j * 16 + 2, j * 16 + 5).toInt();
        Rs[j] = command.substring(j * 16 + 6, j * 16 + 9).toInt();
        Gs[j] = command.substring(j * 16 + 10, j * 16 + 13).toInt();
        Bs[j] = command.substring(j * 16 + 14, j * 16 + 17).toInt();
        if (DEBUG)
          Serial.printf("Setting pixel %d %d to (%d, %d, %d)\n", j, pixels[j], Rs[j], Gs[j], Bs[j]);
        
      }
      
      set_strip(pixels, Rs, Gs, Bs, n_lights);
    } else if ((char) command[0] == 'C') {
      strip.clear();
    }else if ((char) command[0] == '+' || (char) command[0] == '-') {
      readSerial(command, &ang, &delaytime);
      currStepForAng = 0;
      rotate();
    }
  }
  #if BLUETOOTH
  long now = millis();
  if (now - lastBTSend > 1000 && BTConnected) {
    //uint8_t data = random(0,255);
    SerialBT.print("ping\n");
    Serial.println("Sending Ping!");
    lastBTSend = now;
  }
  #endif
  /************ Perform the looping *********/
}


// ** Example manual commands**
// +:000360:02000
// ^^ +/- for direction. 6 digits for angle. 5 digits for "delay time," more delay time = more power but slower

void readSerial(String command, long * ang, int * delaytime) {
  command.trim(); // Trim any whitespace

  if (command.length() >= 1 + 1 + 6 + 1 + 5) {
    int multiplier = ((char) command[0]) == '+' ? 1 : -1; // Get the direction, + or -
    
    long steps = command.substring(2, 8).toInt(); // Get the 6 digit number

    *ang = multiplier * steps;
    
    *delaytime = command.substring(9, 14).toInt();
  
    Serial.printf("Got Command: Angle: %d, delay: %d\n", *ang,*delaytime);
  } else {
    Serial.println("Invalid command format. Use <+/->:<6 digit number>");
  }
}

void rotate() {
  int dir = ang < 0;
  digitalWrite(DIR_PIN, dir); // Set the direction

  int ang_pos = ang < 0 ? ang * -1 : ang;
  currStepForAng += ang < 0 ? -1 : 1;

  long n_steps = (ang_pos * STEPS_PER_REV) / 360;

  //  Perform a step
//  if ((ang < 0 && n_steps <= currStepForAng) || (ang > 0 && currStepForAng <= n_steps)) {
//    digitalWrite(STEP_PIN, HIGH);
//    delayMicroseconds(delaytime);
//    digitalWrite(STEP_PIN, LOW);
//    delayMicroseconds(delaytime);
//  }


//  if (dir < 0 && ang < currAng) {
//    digitalWrite(STEP_PIN, HIGH);
//    delayMicroseconds(delaytime);
//    digitalWrite(STEP_PIN, LOW);
//    delayMicroseconds(delaytime);
//  }
//  
//
  for (long i = 0; i < n_steps; i++) {
    digitalWrite(STEP_PIN, HIGH);
    delayMicroseconds(delaytime);
    digitalWrite(STEP_PIN, LOW);
    delayMicroseconds(delaytime);
  }
}

Credits

Zora Che

Zora Che

1 project • 0 followers
Hana Chitsaz

Hana Chitsaz

1 project • 0 followers

Comments