网络编程
位置:首页>> 网络编程>> Python编程>> python实现图像外边界跟踪操作

python实现图像外边界跟踪操作

作者:szfhy  发布时间:2021-07-20 17:38:33 

标签:python,图像,边界,跟踪

share一些python实现的code


#!/usr/bin/env python
#coding=utf-8

import cv2

img = cv2.imread("trace_border2.bmp")
[img_h, img_w, img_channel] = img.shape

trace = []
start_x = 0
start_y = 0

gray = img[:,:,1]
for h in range(img_h):
 for w in range(img_w):
   if (gray[h,w] > 128):
     gray[h,w] = 255
   else:
     gray[h,w] = 0

#python 跳出多重循环
#https://www.cnblogs.com/xiaojiayu/p/5195316.html
class getoutofloop(Exception): pass
try:
 for h in range(img_h - 2):
   for w in range(img_w - 2):
     if gray[h,w] == 0:
       start_x = w
       start_y = h
       raise getoutofloop
except getoutofloop:
 pass

print("Start Point (%d %d)"%(start_x, start_y))
trace.append([start_x, start_y])

# 8邻域 顺时针方向搜索
neighbor = [[-1,-1],[0,-1],[1,-1],[1,0],[1,1],[0,1],[-1,1],[-1,0]]
neighbor_len = len(neighbor)

#先从当前点的左上方开始,
# 如果左上方也是黑点(边界点):
#     搜索方向逆时针旋转90 i-=2
# 否则:
#     搜索方向顺时针旋转45 i+=1
i = 0
cur_x = start_x + neighbor[i][0]
cur_y = start_y + neighbor[i][1]

is_contour_point = 0

try:
 while not ((cur_x == start_x) and (cur_y == start_y)):
   is_contour_point = 0
   while is_contour_point == 0:
     #neighbor_x = cur_x +
     if gray[cur_y, cur_x] == 0:
       is_contour_point = 1
       trace.append([cur_x, cur_y])
       i -= 2
       if i < 0:
         i += neighbor_len
     else:
       i += 1
       if i >= neighbor_len:
         i -= neighbor_len
     #print(i)
     cur_x = cur_x + neighbor[i][0]
     cur_y = cur_y + neighbor[i][1]
except:
 print("throw error")

for i in range(len(trace)-1):
 cv2.line(img,(trace[i][0],trace[i][1]), (trace[i+1][0], trace[i+1][1]),(0,0,255),3)
 cv2.imshow("img", img)
 cv2.waitKey(10)

cv2.rectangle(img,(start_x, start_y),(start_x + 20, start_y + 20),(255,0,0),2)
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyWindow("img")

搜索过程,红色标记线如下:

python实现图像外边界跟踪操作

补充知识:python实现目标跟踪(opencv)

1.单目标跟踪


import cv2
import sys

(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
print(major_ver, minor_ver, subminor_ver)

if __name__ == '__main__':
 # 创建 *
 tracker_type = 'MIL'
 tracker = cv2.TrackerMIL_create()
 # 读入视频
 video = cv2.VideoCapture("./data/1.mp4")
 # 读入第一帧
 ok, frame = video.read()
 if not ok:
   print('Cannot read video file')
   sys.exit()
 # 定义一个bounding box
 bbox = (287, 23, 86, 320)
 bbox = cv2.selectROI(frame, False)
 # 用第一帧初始化
 ok = tracker.init(frame, bbox)

while True:
   ok, frame = video.read()
   if not ok:
     break
   # Start timer
   timer = cv2.getTickCount()
   # Update tracker
   ok, bbox = tracker.update(frame)
   # Cakculate FPS
   fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
   # Draw bonding box
   if ok:
     p1 = (int(bbox[0]), int(bbox[1]))
     p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
     cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
   else:
     cv2.putText(frame, "Tracking failed detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
   # 展示tracker类型
   cv2.putText(frame, tracker_type+"Tracker", (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
   # 展示FPS
   cv2.putText(frame, "FPS:"+str(fps), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
   # Result
   cv2.imshow("Tracking", frame)

# Exit
   k = cv2.waitKey(1) & 0xff
   if k ==27 : break

2.多目标跟踪

使用GOTURN作为 * 时,须将goturn.caffemodel和goturn.prototxt放到工作目录才能运行,解决问题链接https://stackoverflow.com/questions/48802603/getting-deep-learning-tracker-goturn-to-run-opencv-python


import cv2
import sys

(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
print(major_ver, minor_ver, subminor_ver)

if __name__ == '__main__':
 # 创建 *
 # 'BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE'
 tracker_type = 'MIL'
 tracker = cv2.MultiTracker_create()
 # 创建窗口
 cv2.namedWindow("Tracking")
 # 读入视频
 video = cv2.VideoCapture("./data/1.mp4")
 # 读入第一帧
 ok, frame = video.read()
 if not ok:
   print('Cannot read video file')
   sys.exit()
 # 定义一个bounding box
 box1 = cv2.selectROI("Tracking", frame)
 box2 = cv2.selectROI("Tracking", frame)
 box3 = cv2.selectROI("Tracking", frame)
 # 用第一帧初始化
 ok = tracker.add(cv2.TrackerMIL_create(), frame, box1)
 ok1 = tracker.add(cv2.TrackerMIL_create(), frame, box2)
 ok2 = tracker.add(cv2.TrackerMIL_create(), frame, box3)
 while True:
   ok, frame = video.read()
   if not ok:
     break
   # Start timer
   timer = cv2.getTickCount()
   # Update tracker
   ok, boxes = tracker.update(frame)
   print(ok, boxes)
   # Cakculate FPS
   fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
   for box in boxes:
     # Draw bonding box
     if ok:
       p1 = (int(box[0]), int(box[1]))
       p2 = (int(box[0] + box[2]), int(box[1] + box[3]))
       cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
     else:
       cv2.putText(frame, "Tracking failed detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255),2)
   # 展示tracker类型
   cv2.putText(frame, tracker_type+"Tracker", (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
   # 展示FPS
   cv2.putText(frame, "FPS:"+str(fps), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
   # Result
   cv2.imshow("Tracking", frame)

# Exit
   k = cv2.waitKey(1) & 0xff
   if k ==27 : break

来源:https://blog.csdn.net/szfhy/article/details/82288148

0
投稿

猜你喜欢

手机版 网络编程 asp之家 www.aspxhome.com