Good day
I’m upgrading a testbench for 3D-printing with a high-speed camera, and I want to use the camera to determine motion of the filament (x-axis). I would also like to know the twist of the filament (y-axis). I want to associate these two values with a range of timestamps, thereby calculating velocity.
Since we’re using a macro lens, the camera sees only filament throughout its area.
I’ve tried using dense optical flow, but the frames are so noisy that the visualized colors skip all over the place.
I’m currently trying to use sparse optical flow, storing the vector coordinates in an array and taking their average over a set of frames. Problem is I’ve never touched python before, and I’ve got no idea how to achieve this …
Can you give me pointers?
The example code lk_track_optical_flow-test.py
shows that sparse optical flow works alright for this scenario.
#!/usr/bin/env python
'''
Lucas-Kanade tracker
====================
Lucas-Kanade sparse optical flow demo. Uses goodFeaturesToTrack
for track initialization and back-tracking for match verification
between frames.
Usage
-----
lk_track.py [<video_source>]
Keys
----
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import video
from common import anorm2, draw_str
lk_params = dict( winSize = (25, 25),
maxLevel = 3,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
class App:
def __init__(self, video_src):
self.track_len = 10
self.detect_interval = 5
self.tracks = []
self.cam = video.create_capture(video_src)
self.frame_idx = 0
self.movement_interval = 20
def run(self):
while True:
_ret, frame = self.cam.read()
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
vis = frame.copy()
if len(self.tracks) > 0:
img0, img1 = self.prev_gray, frame_gray
p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
if not good_flag:
continue
tr.append((x, y))
if len(tr) > self.track_len:
del tr[0]
new_tracks.append(tr)
cv.circle(vis, (int(x), int(y)), 2, (0, 255, 0), -1)
self.tracks = new_tracks
cv.polylines(vis, [np.int64(tr) for tr in self.tracks], False, (0, 255, 0))
draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))
# Compute the magnitude and angle of the 2D vector
#magnitude, angle = cv2.cartToPolar(p1[…, 0], p0r[…, 1])
if self.frame_idx % self.detect_interval == 0:
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
cv.circle(mask, (x, y), 5, 0, -1)
p = cv.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
self.tracks.append([(x, y)])
#code block added by Jacob, calculates the mean directional movement between each frame, sums them up, and after 20 frames it continuously displays the results
# prww = p1
# standard_deviation = np.std(prww)
# draw_str(vis, (200, 40), 'movement: %d' % (standard_deviation))
# if self.frame_idx % self.movement_interval == 0:
self.frame_idx += 1
self.prev_gray = frame_gray
cv.imshow('lk_track', vis)
ch = cv.waitKey(1)
if ch == 27:
break
def main():
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
App(video_src).run()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()