johannes gan dombrowskiVanessa TayHuan Shi yu
Published

Leaf Lens

Revolutionary low-cost harvest optimization for high density indoor farming.

IntermediateFull instructions provided301
Leaf Lens

Things used in this project

Story

Read more

Schematics

Report

A pdf report on our project

Code

01_Capture

Python
Collects data from the OAK-D depth, colour and stereo channels and saves them in a NumPy format for post-processing.
####################################################################
#This script collects data from the depth, color and stereo channels
#and saves them as numpy files for post-processing
####################################################################

from pathlib import Path
import cv2
import depthai as dai
import datetime
import numpy as np
import time
# Start defining a pipeline
pipeline = dai.Pipeline()

####################################################################
# Define Color Camera Pipeline
####################################################################
# Define a source - color camera
camRgb = pipeline.createColorCamera()
camRgb.initialControl.setSharpness(0)     # range: 0..4, default: 1
camRgb.initialControl.setLumaDenoise(0)   # range: 0..4, default: 1
camRgb.initialControl.setChromaDenoise(4) # range: 0..4, default: 1
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setFps(10)

# Create RGB output
xoutRgb = pipeline.createXLinkOut()
xoutRgb.setStreamName("rgb")
camRgb.video.link(xoutRgb.input)

####################################################################
# Define Mono Right Pipeline
####################################################################
# Define a source - mono (grayscale) camera - Right
camRight = pipeline.createMonoCamera()
camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
camRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
camRight.setFps(10)

# Create output
xoutRight = pipeline.createXLinkOut()
xoutRight.setStreamName("right")
camRight.out.link(xoutRight.input)

####################################################################
# Define Mono Left Pipeline
####################################################################
# Define a source - mono (grayscale) camera - Left
camLeft = pipeline.createMonoCamera()
camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
camLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
camLeft.setFps(10)
# Create output
xoutLeft = pipeline.createXLinkOut()
xoutLeft.setStreamName("left")
camLeft.out.link(xoutLeft.input)

####################################################################
# Define Depth Pipeline
####################################################################
# Closer-in minimum depth, disparity range is doubled (from 95 to 190):
extended_disparity = False
# Better accuracy for longer distance, fractional disparity 32-levels:
subpixel = True
# Better handling for occlusions:
lr_check = True
# Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way)
depth = pipeline.createStereoDepth()
#depth.setConfidenceThreshold(230)
#depth.setOutputDepth(False)
# Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default)
median = dai.StereoDepthProperties.MedianFilter.KERNEL_7x7 # For depth filtering
#depth.setMedianFilter(median)
depth.setLeftRightCheck(lr_check)

# Normal disparity values range from 0..95, will be used for normalization
max_disparity = 95

if extended_disparity: max_disparity *= 2 # Double the range
depth.setExtendedDisparity(extended_disparity)

if subpixel: max_disparity *= 32 # 5 fractional bits, x32
depth.setSubpixel(subpixel)

# When we get disparity to the host, we will multiply all values with the multiplier
# for better visualization
multiplier = 255 / max_disparity

camLeft.out.link(depth.left)
camRight.out.link(depth.right)
# Create output
xout = pipeline.createXLinkOut()
xout.setStreamName("disparity")
depth.disparity.link(xout.input)


####################################################################
# Connect to device
####################################################################
# Pipeline is defined, now we can connect to the device
Path('/home/pi/raspi-oak/data').mkdir(parents=True, exist_ok=True)     # Make sure the destination path is present
with dai.Device(pipeline) as device:

    # Start pipeline
    device.startPipeline()
    
    time.sleep(2.5) #allow autofocus and autoexposure to settle

    # Output queue will be used to get the rgb and grayscale frames from the output defined above
    qRgb = device.getOutputQueue(name="rgb", maxSize=5, blocking=False)
    qRight = device.getOutputQueue(name="right", maxSize=5, blocking=False)
    qLeft = device.getOutputQueue(name="left", maxSize=5, blocking=False)
    qDepth = device.getOutputQueue(name="disparity", maxSize=5, blocking=False)

    inRgb = None
    inRight = None
    inLeft = None
    inDepth = None
    for i in range(0,50):

        inRgb = qRgb.get()
        # Data is originally represented as a flat 1D array, it needs to be converted into HxW form
        if (inRgb is not None) :
            frameRgb = inRgb.getCvFrame()
            #np.save(f"/home/pi/raspi-oak/data/{filename}_rgb.npy",frameRgb)          
        
        inRight = qRight.get()  # Blocking call, will wait until a new data has arrived
        if (inRight is not None) :
            frameRight = inRight.getCvFrame()
            #np.save(f"/home/pi/raspi-oak/data/{filename}_monor.npy",frameRight)
        
        inLeft = qLeft.get()  # non-blocking call, will return a new data that has arrived or None otherwise
        if (inLeft is not None) :
            frameLeft = inLeft.getCvFrame()
            #np.save(f"/home/pi/raspi-oak/data/{filename}_monol.npy",frameLeft)
        
        inDepth = qDepth.get() 
        if (inDepth is not None) :
            frameDepth = inDepth.getFrame()
            #np.save(f"/home/pi/raspi-oak/data/{filename}_depth.npy",frameDepth)

    filename = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")

    inRgb = qRgb.get()
    # Data is originally represented as a flat 1D array, it needs to be converted into HxW form
    if (inRgb is not None) :
        frameRgb = inRgb.getCvFrame()
        np.save(f"./data/{filename}_rgb.npy",frameRgb)          
    
    inRight = qRight.get()  # Blocking call, will wait until a new data has arrived
    if (inRight is not None) :
        frameRight = inRight.getCvFrame()
        np.save(f"./data/{filename}_monor.npy",frameRight)
    
    inLeft = qLeft.get()  # non-blocking call, will return a new data that has arrived or None otherwise
    if (inLeft is not None) :
        frameLeft = inLeft.getCvFrame()
        np.save(f"./data/{filename}_monol.npy",frameLeft)
    
    inDepth = qDepth.get() 
    if (inDepth is not None) :
        frameDepth = inDepth.getFrame()
        np.save(f"./data/{filename}_depth.npy",frameDepth)

02_Show

Python
Performs the following steps:
1. Reads the stored NumPy data
2. Plays them back in sequence
#####################################################
# This script reads the stored numpy data and
# 1. plays them back in sequence 
# 2. calculates the Depth_SUM of each depth snapshot
# 3. saves the filenames and corresponding Depth_SUM in a csv file
#####################################################

import numpy as np
import cv2
import os
import csv
from pathlib import Path
Path('./img').mkdir(parents=True, exist_ok=True)     # Make sure the destination path is present

#font settings
#bottomLeftCornerOfText = (40,520)
#bottomLeftCornerOfText_train_status = (20,20)
#bottomLeftCornerOfText_assy_status = (20,40)
#font = cv2.FONT_HERSHEY_SIMPLEX
#fontScale = 0.7
#fontColor = (255,255,255)
#lineType = 2

#text background
#start_point = (0, 480) # Start coordinate represents the top left corner of rectangle
#end_point = (960, 540) # Ending coordinate represents the bottom right corner of rectangle
#color = (0, 0, 0) # Blue color in BGR
#thickness = -1 # Line thickness in px ( -1 = fill)
#train status display box
#start_point_train_status = (20, 20)
#end_point_train_status = (100, 80)
#color_train_status = (0,0,0)
#thickness_train_status = -1

#####################################################
# specify data directory for reading 
directorystr = 'data' #directory where npy files are stored
#####################################################
entries = os.listdir(f'./{directorystr}/')
entries.sort()
print(entries) #list all the files in directory
count = 0
#dimensions for image scaling
width = 1280
height = 760
dim = (width, height)

depthlist = []
fnamelist =[]
for f_name in entries:
    print(f'{count}:{f_name}')

    if (count == 0):
        print(f'Plot {count}:{f_name}')
        fstrfull = f'./{directorystr}/{f_name}'
        f = open(fstrfull,'rb')
        fnamelist.append(f_name) #add filename to list
        depth_data = np.load(f)
        #scale depth data for plotting 0-255
        depth_sum = np.sum(depth_data)
        depthlist.append(depth_sum) #add depth sum to list
        depth_data = (depth_data/3040*255).astype(np.uint8)
        depth_data = cv2.applyColorMap(depth_data, cv2.COLORMAP_JET)
        print(f_name)
        print("shape=",depth_data.shape)
        print("max=", np.amax(depth_data))
        depth_rsz = cv2.resize(depth_data, dim, interpolation = cv2.INTER_AREA)

        #cv2.rectangle(depth_rsz, start_point, end_point, color, thickness)
        #displaystr = f'{f_name} : Depth SUM = {depth_sum:,}'
        #cv2.putText(depth_rsz, displaystr , bottomLeftCornerOfText ,font , fontScale , fontColor , lineType)

 #   if (count == 1):
 #       print(f'Plot {count}:{f_name}')
 #       fstrfull = "./data/"+f_name
 #       f = open(fstrfull,'rb')
#        monol_data = np.load(f)
#        print(f_name)
#        print("shape=",monol_data.shape)
#        monol_rsz = cv2.resize(monol_data, dim, interpolation = cv2.INTER_AREA)
#        monol_rsz = cv2.cvtColor(monol_rsz,cv2.COLOR_GRAY2RGB)
#        cv2.rectangle(monol_rsz, start_point, end_point, color, thickness)
#        displaystr = f'{f_name} : MONO LEFT'
#        cv2.putText(monol_rsz, displaystr , bottomLeftCornerOfText ,font , fontScale , fontColor , lineType)

#    if (count == 2):
#        print(f'Plot {count}:{f_name}')
#        fstrfull = "./data/"+f_name
#        f = open(fstrfull,'rb')
#        monor_data = np.load(f)
#        print(f_name)
#        print("shape=",monor_data.shape)
#        monor_rsz = cv2.resize(monor_data, dim, interpolation = cv2.INTER_AREA)
#        monor_rsz = cv2.cvtColor(monor_rsz,cv2.COLOR_GRAY2RGB)
#        cv2.rectangle(monor_rsz, start_point, end_point, color, thickness)
#        displaystr = f'{f_name} : MONO RIGHT'
#        cv2.putText(monor_rsz, displaystr , bottomLeftCornerOfText ,font , fontScale , fontColor , lineType)

    if (count == 3):
        fstrfull = "./data/"+f_name
        f = open(fstrfull,'rb')
        rgb_data = np.load(f)
        print(f_name)
        print("shape=",rgb_data.shape)
        print("max=", np.amax(rgb_data))
        rgb_rsz = cv2.resize(rgb_data, dim, interpolation = cv2.INTER_AREA)
        #cv2.rectangle(rgb_rsz, start_point, end_point, color, thickness)
        displaystr1 = f'{f_name} : RGB'
        #cv2.putText(rgb_rsz, displaystr1 , bottomLeftCornerOfText ,font , fontScale , fontColor , lineType)


#####################################################
# the depth_sum value is used to differentiate part of the train 
#####################################################

        if (depth_sum > 500000000): #train in front of camera
            cv2.rectangle(rgb_rsz, (0,0), (350,50), color_train_status, thickness_train_status)
            displaystr2 = 'TRAIN IN RANGE'
            #cv2.putText(rgb_rsz, displaystr2 , bottomLeftCornerOfText_train_status ,font , fontScale , fontColor , lineType)
            print('Depth Sum =',depth_sum, ' Train in range')
        if (depth_sum > 1000000000): #CCD in front of camera
            displaystr2 = 'SHOE ASSEMBLY IN RANGE'
            #cv2.putText(rgb_rsz, displaystr2 , bottomLeftCornerOfText_assy_status ,font , fontScale , fontColor , lineType)
            print('Depth Sum =',depth_sum, ' Shoe Assembly in range')
        depth_sum = 0

        #top_row = np.concatenate((rgb_rsz, depth_rsz), axis=1)
        #bottom_row = np.concatenate((monol_rsz, monor_rsz), axis=1)
        #image_concat = np.concatenate((top_row, bottom_row), axis=0)
        cv2.imshow("combine", rgb_rsz)
        imgfname = './img/' + f_name + '.png'
        cv2.imwrite(imgfname, rgb_rsz) #save image to disk
        cv2.waitKey(1) #modify the value to control playback speed

    count = (count+1) % 4

#####################################################
# saves the filenames and depth_sum in a csv file
#####################################################

writelist=zip(fnamelist,depthlist)
depthfilename = directorystr+'.csv'
with open(depthfilename, 'w') as fw:
    writer = csv.writer(fw,delimiter=',',lineterminator='\r')
    writer.writerows(writelist)

03_Analyse

Python
Jupyter notebook for:
1. Analysing growth of foliage volume
2. Plotting data collected from the farm
# -*- coding: utf-8 -*-
"""RaspiOakFarm.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1DkavLOvESRatwVPYDUwM-qV-28JtsJDh
"""

#21 April TOP & FROINT
!wget --content-disposition https://www.dropbox.com/scl/fi/5aeta74tuh21rhj3hqiv3/Data21April_Top.zip?rlkey=9wt0htoh2k9duizp5g23f05tp&dl=0
!unzip -j Data21April_Top.zip -d ./data_top/
!wget --content-disposition https://www.dropbox.com/scl/fi/y49v1x3sf4347hn6dxo1x/Data21April_Front.zip?rlkey=6megcxrcafvr49rsrmp3q89bx&dl=0
!unzip -j Data21April_Front.zip -d ./data_front/



#4 Aug TOP
!wget --content-disposition https://www.dropbox.com/scl/fi/ri3wuicv96y5dyqaidiab/R5_2023-08-04_24_TOP.zip?rlkey=4wcx7oc33wexykr6x39v03sjs&dl=0
!unzip -j R5_2023-08-04_24_TOP.zip -d ./data_0804_front/
#4 Aug FRONT
!wget --content-disposition https://www.dropbox.com/scl/fi/wag9c3syx5da0fk81t5ny/R5_2023-08-04_24_FRONT.zip?rlkey=uwxehcuaf4jwimqs9l1ehdn3c&dl=0
!unzip -j R5_2023-08-04_24_FRONT.zip -d ./data_front/



import numpy as np
import cv2
import os
import csv
from pathlib import Path

#font settings for labeling images
bottomLeftCornerOfText = (40,520)
bottomLeftCornerOfText_train_status = (20,20)
bottomLeftCornerOfText_assy_status = (20,40)
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.7
fontColor = (255,255,255)
lineType = 2

#text background
start_point = (0, 480) # Start coordinate represents the top left corner of rectangle
end_point = (960, 540) # Ending coordinate represents the bottom right corner of rectangle
color = (0, 0, 0) # Black color in BGR
thickness = -1 # Line thickness in px ( -1 = fill)

"""# Let's examine a depth file"""

fstrfull = './data_top/2023-04-17-16-00-21_depth.npy'

f = open(fstrfull,'rb')
data_disparity = np.load(f)

data_disparity[0,1:10] #print out the first 10 elements

data_disparity.max()

data_disparity.min()

rows, cols = data_disparity.shape
print(f'Total {rows} rows and {cols} columns.')

"""Using depth as a segmentation tool"""

data_disparity2 = np.zeros(shape=(rows, cols))
for row in range (0,rows):
    for col in range(0,cols):
        if (data_disparity[row,col] < 301) or (data_disparity[row,col] > 450):
            data_disparity2[row,col] = 1
        else:
            data_disparity2[row,col] = ((data_disparity[row,col] - 300) / 150 * 255) #normalizing

data_disparity2 = data_disparity2.astype(np.uint8)
data_disparity2 = cv2.applyColorMap(data_disparity2, cv2.COLORMAP_JET)

from google.colab.patches import cv2_imshow
cv2_imshow(data_disparity)

from google.colab.patches import cv2_imshow
cv2_imshow(data_disparity2)
#cv2.imwrite('export.png', data_disparity2) #saves as PNG

#mid row bottom line
start_point = (0, 360) # Start coordinate represents the top left corner of rectangle
end_point = (1280, 361) # Ending coordinate represents the bottom right corner of rectangle
color = (255, 255, 255) # Blue color in BGR
thickness = -1 # Line thickness in px ( -1 = fill)
cv2.rectangle(data_disparity2, start_point, end_point, color, thickness)
#mid row top line
start_point = (0, 200) # Start coordinate represents the top left corner of rectangle
end_point = (1280, 201) # Ending coordinate represents the bottom right corner of rectangle
color = (255, 255, 255) # Blue color in BGR
thickness = -1 # Line thickness in px ( -1 = fill)
cv2.rectangle(data_disparity2, start_point, end_point, color, thickness)

#top row, top line
start_point = (0, 60) # Start coordinate represents the top left corner of rectangle
end_point = (1280, 61) # Ending coordinate represents the bottom right corner of rectangle
color = (255, 255, 255) # Blue color in BGR
thickness = -1 # Line thickness in px ( -1 = fill)
cv2.rectangle(data_disparity2, start_point, end_point, color, thickness)
#top row, bottom line
start_point = (0, 190) # Start coordinate represents the top left corner of rectangle
end_point = (1280, 191) # Ending coordinate represents the bottom right corner of rectangle
color = (255, 255, 255) # Blue color in BGR
thickness = -1 # Line thickness in px ( -1 = fill)
cv2.rectangle(data_disparity2, start_point, end_point, color, thickness)

#bottom row, top line
start_point = (0, 520) # Start coordinate represents the top left corner of rectangle
end_point = (1280, 521) # Ending coordinate represents the bottom right corner of rectangle
color = (255, 255, 255) # Blue color in BGR
thickness = -1 # Line thickness in px ( -1 = fill)
cv2.rectangle(data_disparity2, start_point, end_point, color, thickness)
#bottom row, bottom line
start_point = (0, 719) # Start coordinate represents the top left corner of rectangle
end_point = (1280, 720) # Ending coordinate represents the bottom right corner of rectangle
color = (255, 255, 255) # Blue color in BGR
thickness = -1 # Line thickness in px ( -1 = fill)
cv2.rectangle(data_disparity2, start_point, end_point, color, thickness)

cv2_imshow(data_disparity2)

"""Calculate the depth distribution
https://docs.luxonis.com/projects/api/en/latest/components/nodes/stereo_depth/#calculate-depth-using-disparity-map
"""

data_depth = np.zeros(shape=(rows, cols))
for row in range (0,rows):
    for col in range(0,cols):
        if (data_disparity[row,col] > 0) :
            data_depth[row,col] =  883.15 * 7.5 / data_disparity[row,col] #from camera specs
print(f'Depth ranges from {data_depth.min()} to {data_depth.max()}')

row_mid = []
for row in range (200,360):
    for col in range(0,cols):
        if (data_depth[row,col] < 2) or (data_depth[row,col] > 60):
            row_mid.append(0)
        else: #range 100 to 250 mm
            row_mid.append(data_depth[row,col])
#plt.hist(row_mid, bins=range(0, 60 + binwidth, binwidth))

row_bottom = []
for row in range (520,720):
    for col in range(0,cols):
        if (data_depth[row,col] < 2) or (data_depth[row,col] > 60):
            row_bottom.append(0)
        else: #range 100 to 250 mm
            row_bottom.append(data_depth[row,col])
#plt.hist(row_bottom, bins=range(0, 60 + binwidth, binwidth))

row_top = []
for row in range (60,190):
    for col in range(0,cols):
        if (data_depth[row,col] < 2) or (data_depth[row,col] > 60):
            row_top.append(0)
        else: #range 100 to 250 mm
            row_top.append(data_depth[row,col])
#plt.hist(row_top, bins=range(0, 60 + binwidth, binwidth))

import matplotlib.pyplot as plt
binwidth=1
plt.hist(data_depth.flatten(), bins=range(1, 60 + binwidth, binwidth))
plt.xlabel('Depth (cm)')
plt.ylabel('Number of pixels (counts)')
plt.title('Distribution of depth data')

plt.hist(row_top, bins=range(1, 60 + binwidth, binwidth),label='top', alpha=0.5)
plt.hist(row_mid, bins=range(1, 60 + binwidth, binwidth),label='mid' , alpha=0.5)
plt.hist(row_bottom, bins=range(1, 60 + binwidth, binwidth),label='bottom', alpha=0.5)
plt.xlabel('Depth (cm)')
plt.ylabel('Number of pixels (counts)')
plt.title('Distribution of depth data for top, middle and bottom rows')
plt.legend()

"""# Compare plant heights at same time every day using top camera"""

#try out different time of day - what is the best time when height is max.
fstrfull6 = './data_top/2023-04-17-16-00-21_depth.npy'
fstrfull7 = './data_top/2023-04-18-16-00-19_depth.npy'
fstrfull8 = './data_top/2023-04-19-16-00-18_depth.npy'
fstrfull9 = './data_top/2023-04-20-16-00-22_depth.npy'
fstrfull10 = './data_top/2023-04-21-16-00-21_depth.npy'

fstrfull6rgb = './data_top/2023-04-17-16-00-21_rgb.npy'
fstrfull7rgb = './data_top/2023-04-18-16-00-19_rgb.npy'
fstrfull8rgb = './data_top/2023-04-19-16-00-18_rgb.npy'
fstrfull9rgb = './data_top/2023-04-20-16-00-22_rgb.npy'
fstrfull10rgb = './data_top/2023-04-21-16-00-21_rgb.npy'

label6 = '17Apr'
label7 = '18Apr'
label8 = '19Apr'
label9 = '20Apr'
label10 = '21Apr'

f6 = open(fstrfull6,'rb')
data6_disparity = np.load(f6)

f7 = open(fstrfull7,'rb')
data7_disparity = np.load(f7)

f8 = open(fstrfull8,'rb')
data8_disparity = np.load(f8)

f9 = open(fstrfull9,'rb')
data9_disparity = np.load(f9)

f10 = open(fstrfull10,'rb')
data10_disparity = np.load(f10)

rows, cols = data10_disparity.shape

binwidth = 1
plt.hist(data6_disparity.flatten(), bins=range(1, 1000 + binwidth, binwidth))
plt.xlabel('Disparity')
plt.ylabel('Number of pixels (counts)')

data6_depth = np.zeros(shape=(rows, cols))
data7_depth = np.zeros(shape=(rows, cols))
data8_depth = np.zeros(shape=(rows, cols))
data9_depth = np.zeros(shape=(rows, cols))
data10_depth = np.zeros(shape=(rows, cols))

data6_height = np.zeros(shape=(rows, cols))
data7_height = np.zeros(shape=(rows, cols))
data8_height = np.zeros(shape=(rows, cols))
data9_height = np.zeros(shape=(rows, cols))
data10_height = np.zeros(shape=(rows, cols))

import math
x0 = 5
y0 = 50

for row in range (0,rows):
    for col in range(0,cols):
        if (data6_disparity[row,col] > 0 ) :
            data6_depth[row,col] =  883.15 * 7.5 / data6_disparity[row,col]
            if (data6_depth[row,col] > 10 and data6_depth[row,col] < 50):
              data6_height[row,col] = y0 - math.sqrt(data6_depth[row,col]*data6_depth[row,col] - x0*x0)
            else:
              data6_height[row,col] = 0


        if (data7_disparity[row,col] > 0 ) :
            data7_depth[row,col] =  883.15 * 7.5 / data7_disparity[row,col]
            if (data7_depth[row,col] > 10 and data7_depth[row,col] < 50):
              data7_height[row,col] = y0 - math.sqrt(data7_depth[row,col]*data7_depth[row,col] - x0*x0)
            else:
              data7_height[row,col] = 0


        if (data8_disparity[row,col] > 0 ) :
            data8_depth[row,col] =  883.15 * 7.5 / data8_disparity[row,col]
            if (data8_depth[row,col] > 10 and data8_depth[row,col] < 50) :
              data8_height[row,col] = y0 - math.sqrt(data8_depth[row,col]*data8_depth[row,col] - x0*x0)
            else :
              data8_height[row,col] = 0


        if (data9_disparity[row,col] > 0) :
            data9_depth[row,col] =  883.15 * 7.5 / data9_disparity[row,col]
            if (data9_depth[row,col] > 10  and data9_depth[row,col] < 50) :
              data9_height[row,col] = y0 - math.sqrt(data9_depth[row,col]*data9_depth[row,col] - x0*x0)
            else :
              data9_height[row,col] = 0


        if (data10_disparity[row,col] > 0 ) :
            data10_depth[row,col] =  883.15 * 7.5 / data10_disparity[row,col]
            if (data10_depth[row,col] > 10 and data10_depth[row,col] < 50) :
              data10_height[row,col] = y0 - math.sqrt(data10_depth[row,col]*data10_depth[row,col] - x0*x0)
            else:
              data10_height[row,col] = 0

binwidth = 1
plt.hist(data6_depth.flatten(), bins=range(1, 100 + binwidth, binwidth))
plt.xlabel('Depth (a.u.)')
plt.ylabel('Number of pixels (counts)')

binwidth = 1
plt.hist(data6_height.flatten(), bins=range(1, 100 + binwidth, binwidth))
plt.xlabel('Height (a.u.)')
plt.ylabel('Number of pixels (counts)')

data6_depth_plotdata = np.zeros(shape=(rows, cols))
data6_height_plotdata = np.zeros(shape=(rows, cols))
count6 = 0
for row in range (0,rows):
    for col in range(0,cols):
        if (data6_depth[row,col] < 2) or (data6_depth[row,col] > 60):
            data6_depth_plotdata[row,col] =  0
        else: #range 100 to 250 mm
            data6_depth_plotdata[row,col] =  (data6_depth[row,col] - 2) / 18 * 255
            data6_height_plotdata[row,col] = y0 - math.sqrt(data6_depth[row,col]*data6_depth[row,col] - x0*x0)
            count6+=data6_height_plotdata[row,col]
data6_depth_plotdata = data6_depth_plotdata.astype(np.uint8)
data6_depth_plotdata = cv2.applyColorMap(data6_depth_plotdata, cv2.COLORMAP_JET)
plt.figure(figsize=[8,6])
plt.imshow(data6_depth_plotdata)
plt.text(50,50, label6, c='white', fontsize='x-large', fontweight='bold')
print(count6)

data6_depth_plotdata = np.zeros(shape=(rows, cols))
data6_height_plotdata = np.zeros(shape=(rows, cols))
count6 = 0
for row in range (0,rows):
    for col in range(0,cols):
        if (row < 200) or (row > 350) or (data6_depth[row,col] < 2) or (data6_depth[row,col] > 20):
            data6_depth_plotdata[row,col] =  0
        else: #range 100 to 250 mm
            data6_depth_plotdata[row,col] =  (data6_depth[row,col] - 2) / 18 * 255
            data6_height_plotdata[row,col] = y0 - math.sqrt(data6_depth[row,col]*data6_depth[row,col] - x0*x0)
            count6+=data6_height_plotdata[row,col]
data6_depth_plotdata = data6_depth_plotdata.astype(np.uint8)
data6_depth_plotdata = cv2.applyColorMap(data6_depth_plotdata, cv2.COLORMAP_JET)
plt.figure(figsize=[8,6])
plt.imshow(data6_depth_plotdata)
plt.text(50,50, label6 + ':' + str(count6), c='white', fontsize='x-large', fontweight='bold')
print(count6)

f = open(fstrfull6rgb,'rb')
rgb6_data = np.load(f)
plt.figure(figsize=[8,6])
plt.imshow(rgb6_data)
plt.text(50,100, label6 + ':' + str(count6), c='white', fontsize='x-large', fontweight='bold')

binwidth = 1
plt.hist(data7_depth.flatten(), bins=range(1, 100 + binwidth, binwidth))
plt.xlabel('Depth (cm)')
plt.ylabel('Number of pixels (counts)')

data7_depth_plotdata = np.zeros(shape=(rows, cols))
data7_height_plotdata = np.zeros(shape=(rows, cols))
count7 = 0
for row in range (0,rows):
    for col in range(0,cols):
        if (row < 200) or (row > 350) or (data7_depth[row,col] < 2) or (data7_depth[row,col] > 20):
            data7_depth_plotdata[row,col] =  0
        else: #range 100 to 250 mm
            data7_depth_plotdata[row,col] =  (data7_depth[row,col] - 2) / 18 * 255
            data7_height_plotdata[row,col] = y0 - math.sqrt(data7_depth[row,col]*data7_depth[row,col] - x0*x0)
            count7+=data7_height_plotdata[row,col]
data7_depth_plotdata = data7_depth_plotdata.astype(np.uint8)
data7_depth_plotdata = cv2.applyColorMap(data7_depth_plotdata, cv2.COLORMAP_JET)
plt.figure(figsize=[8,6])
plt.imshow(data7_depth_plotdata)
plt.text(50,50, label7 + ':' + str(count7), c='white', fontsize='x-large', fontweight='bold')
print(count7)

f = open(fstrfull7rgb,'rb')
rgb7_data = np.load(f)
plt.figure(figsize=[8,6])
plt.imshow(rgb7_data)
plt.text(50,100, label7 + ':' + str(count7), c='white', fontsize='x-large', fontweight='bold')

binwidth = 1
plt.hist(data8_depth.flatten(), bins=range(1, 100 + binwidth, binwidth))
plt.xlabel('Depth (cm)')
plt.ylabel('Number of pixels (counts)')

data8_depth_plotdata = np.zeros(shape=(rows, cols))
data8_height_plotdata = np.zeros(shape=(rows, cols))
count8 = 0
for row in range (0,rows):
    for col in range(0,cols):
        if (row < 200) or (row > 350) or (data8_depth[row,col] < 2) or (data8_depth[row,col] > 20):
            data8_depth_plotdata[row,col] =  0
        else: #range 100 to 250 mm
            data8_depth_plotdata[row,col] =  (data8_depth[row,col] - 2) / 18 * 255
            data8_height_plotdata[row,col] = y0 - math.sqrt(data8_depth[row,col]*data8_depth[row,col] - x0*x0)
            count8+=data8_height_plotdata[row,col]
data8_depth_plotdata = data8_depth_plotdata.astype(np.uint8)
data8_depth_plotdata = cv2.applyColorMap(data8_depth_plotdata, cv2.COLORMAP_JET)
plt.figure(figsize=[8,6])
plt.imshow(data8_depth_plotdata)
plt.text(50,50, label8 + ':' + str(count8), c='white', fontsize='x-large', fontweight='bold')
print(count8)

f = open(fstrfull8rgb,'rb')
rgb8_data = np.load(f)
plt.figure(figsize=[8,6])
plt.imshow(rgb8_data)
plt.text(50,100, label8 + ':' + str(count8), c='white', fontsize='x-large', fontweight='bold')

binwidth = 1
plt.hist(data9_depth.flatten(), bins=range(1, 100 + binwidth, binwidth))
plt.xlabel('Depth (cm)')
plt.ylabel('Number of pixels (counts)')

data9_depth_plotdata = np.zeros(shape=(rows, cols))
data9_height_plotdata = np.zeros(shape=(rows, cols))
count9 = 0
for row in range (0,rows):
    for col in range(0,cols):
        if (row < 200) or (row > 350) or (data9_depth[row,col] < 2) or (data9_depth[row,col] > 20):
            data9_depth_plotdata[row,col] =  0
        else: #range 100 to 250 mm
            data9_depth_plotdata[row,col] =  (data9_depth[row,col] - 2) / 18 * 255
            data9_height_plotdata[row,col] = y0 - math.sqrt(data9_depth[row,col]*data9_depth[row,col] - x0*x0)
            count9+=data9_height_plotdata[row,col]
data9_depth_plotdata = data9_depth_plotdata.astype(np.uint8)
data9_depth_plotdata = cv2.applyColorMap(data9_depth_plotdata, cv2.COLORMAP_JET)
plt.figure(figsize=[8,6])
plt.imshow(data9_depth_plotdata)
plt.text(50,50, label9 + ':' + str(count9), c='white', fontsize='x-large', fontweight='bold')
print(count9)

f = open(fstrfull9rgb,'rb')
rgb9_data = np.load(f)
plt.figure(figsize=[8,6])
plt.imshow(rgb9_data)
plt.text(50,100, label9 + ':' + str(count9), c='white', fontsize='x-large', fontweight='bold')

data10_depth_plotdata = np.zeros(shape=(rows, cols))
data10_height_plotdata = np.zeros(shape=(rows, cols))
count10=0
for row in range (0,rows):
    for col in range(0,cols):
        if (row < 200) or (row > 350) or (data10_depth[row,col] < 2) or (data10_depth[row,col] > 20):
            data10_depth_plotdata[row,col] =  0
        else: #range 100 to 250 mm
            data10_depth_plotdata[row,col] =  (data10_depth[row,col] - 2) / 18 * 255
            data10_height_plotdata[row,col] = y0 - math.sqrt(data10_depth[row,col]*data10_depth[row,col] - x0*x0)
            count10+=data10_height_plotdata[row,col]
data10_depth_plotdata = data10_depth_plotdata.astype(np.uint8)
data10_depth_plotdata = cv2.applyColorMap(data10_depth_plotdata, cv2.COLORMAP_JET)
plt.figure(figsize=[8,6])
plt.imshow(data10_depth_plotdata)
plt.text(50,50, label10 + ':' + str(count10), c='white', fontsize='x-large', fontweight='bold')
print(count10)

f = open(fstrfull10rgb,'rb')
rgb10_data = np.load(f)
plt.figure(figsize=[8,6])
plt.imshow(rgb10_data)
plt.text(50,100, label10 + ':' + str(count10), c='white', fontsize='x-large', fontweight='bold')

counts = [count6, count7, count8, count9, count10]
label = [label6, label7, label8, label9, label10]

plt.plot(label,counts,'bo:')

cv2_imshow(rgb10_data)
#cv2.imwrite('rgb10.png', rgb10_data)

counts[0]

#normalized plot
ncounts = np.zeros(len(counts))
for i in range(0,len(ncounts)):
    ncounts[i] = counts[i]/counts[0]
plt.plot(label,ncounts,'bo:')
plt.ylabel('Normalized ROI area')

Credits

johannes gan dombrowski

johannes gan dombrowski

1 project • 0 followers
Vanessa Tay

Vanessa Tay

0 projects • 0 followers
Huan Shi yu

Huan Shi yu

0 projects • 0 followers

Comments