summaryrefslogtreecommitdiff
path: root/cv/seg
diff options
context:
space:
mode:
Diffstat (limited to 'cv/seg')
-rw-r--r--cv/seg/human_seg.py33
-rw-r--r--cv/seg/img2video.py30
-rw-r--r--cv/seg/utils.py128
-rw-r--r--cv/seg/video_seg/processing.py58
-rwxr-xr-xcv/seg/video_seg/vtest.avibin0 -> 8131690 bytes
5 files changed, 249 insertions, 0 deletions
diff --git a/cv/seg/human_seg.py b/cv/seg/human_seg.py
new file mode 100644
index 0000000..106b5c7
--- /dev/null
+++ b/cv/seg/human_seg.py
@@ -0,0 +1,33 @@
+# 1、导入模块
+import os
+import paddlehub as hub
+import numpy as np
+from collections import Counter
+import cv2
+import matplotlib.pyplot as plt
+
+
+path = './data/'
+files = os.listdir(path)
+imgs = []
+
+
+for i in files:
+ imgs.append(path + i)
+
+cv2.imshow('', cv2.imread(imgs[0]))
+
+
+
+
+# 2、加载模型
+humanseg = hub.Module(name='deeplabv3p_xception65_humanseg')
+
+
+
+results = humanseg.segmentation(data={'image':imgs}, visualization=True)
+unique, counts = np.unique(results[0]['data'], return_counts=True)
+print(dict(zip(unique, counts)))
+
+print()
+
diff --git a/cv/seg/img2video.py b/cv/seg/img2video.py
new file mode 100644
index 0000000..c824c9c
--- /dev/null
+++ b/cv/seg/img2video.py
@@ -0,0 +1,30 @@
+import cv2
+import os
+from tqdm import tqdm
+
+image_folder = './video_seg/v2j'
+video_name = 'video.avi'
+
+images = [img for img in os.listdir(image_folder) if img.endswith(".jpg")]
+frame = cv2.imread(os.path.join(image_folder, images[0]))
+height, width, layers = frame.shape
+
+video = cv2.VideoWriter(video_name, 0, 10, (width,height))
+images.sort(key=lambda s: int(s.split('.')[0].split('-')[1]))
+for image in tqdm(images):
+ video.write(cv2.imread(os.path.join(image_folder, image)))
+
+cv2.destroyAllWindows()
+video.release()
+
+
+# import os
+# import moviepy.video.io.ImageSequenceClip
+# image_folder = './video_seg/v2j'
+# fps = 10
+#
+# image_files = [os.path.join(image_folder,img)
+# for img in os.listdir(image_folder)
+# if img.endswith(".jpg")]
+# clip = moviepy.video.io.ImageSequenceClip.ImageSequenceClip(image_files, fps=fps)
+# clip.write_videofile('my_video.mp4') \ No newline at end of file
diff --git a/cv/seg/utils.py b/cv/seg/utils.py
new file mode 100644
index 0000000..0329c84
--- /dev/null
+++ b/cv/seg/utils.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+
+"""
+Function for interactively selecting part of an array displayed as an image with matplotlib.
+"""
+
+import matplotlib.pyplot as plt
+from matplotlib import is_interactive
+from matplotlib.path import Path
+from matplotlib.widgets import LassoSelector, RectangleSelector
+import numpy as np
+
+
+def path_bbox(p):
+ """
+ Return rectangular bounding box of given path.
+ Parameters
+ ----------
+ p : array_like
+ Array of vertices with shape Nx2.
+ Returns
+ -------
+ bbox : array_like
+ Array of bounding box vertices with shape 4x2.
+ """
+
+ assert p.ndim == 2
+ assert p.shape[1] == 2
+
+ ix_min = p[:, 0].argmin()
+ ix_max = p[:, 0].argmax()
+ iy_min = p[:, 1].argmin()
+ iy_max = p[:, 1].argmax()
+
+ return np.array([[p[ix_min, 0], p[iy_min, 1]],
+ [p[ix_min, 0], p[iy_max, 1]],
+ [p[ix_max, 0], p[iy_max, 1]],
+ [p[ix_max, 0], p[iy_min, 1]]])
+
+
+def imshow_select(data, selector='lasso', bbox=False):
+ """
+ Display array as image with region selector.
+
+ Parameters
+ ----------
+ data : array_like
+ Array to display.
+ selector : str
+ Region selector. For `lasso`, use `LassoSelector`; for `rectangle`,
+ use `RectangleSelector`.
+ bbox : bool
+ If True, only return array within rectangular bounding box of selected region.
+ Otherwise, return array with same dimensions as `data` such that selected region
+ contains the corresponding values from `data` and the remainder contains 0.
+ Returns
+ -------
+ region : array_like
+ Data for selected region.
+ mask : array_like
+ Boolean mask with same shape of `data` for selecting the returned region from `data`.
+ """
+
+ interactive = is_interactive()
+ if not interactive:
+ plt.ion()
+ fig = plt.figure()
+ ax = fig.gca()
+ ax.imshow(data)
+
+ x, y = np.meshgrid(np.arange(data.shape[1], dtype=int),
+ np.arange(data.shape[0], dtype=int))
+ pix = np.vstack((x.flatten(), y.flatten())).T
+
+ # Store data in dict value to permit overwriting by nested
+ # functions in Python 2.7:
+ selected = {}
+ selected['data'] = np.zeros_like(data)
+ selected['mask'] = np.tile(False, data.shape)
+
+ def _onselect_lasso(verts):
+ verts = np.array(verts)
+ p = Path(verts)
+ ind = p.contains_points(pix, radius=1)
+ selected['data'].flat[ind] = data.flat[ind]
+ selected['mask'].flat[ind] = True
+ if bbox:
+ b = path_bbox(verts)
+ selected['data'] = selected['data'][int(min(b[:, 1])):int(max(b[:, 1])),
+ int(min(b[:, 0])):int(max(b[:, 0]))]
+
+ def _onselect_rectangle(start, end):
+ verts = np.array([[start.xdata, start.ydata],
+ [start.xdata, end.ydata],
+ [end.xdata, end.ydata],
+ [end.xdata, start.ydata]], int)
+ p = Path(verts)
+ ind = p.contains_points(pix, radius=1)
+ selected['data'].flat[ind] = data.flat[ind]
+ selected['mask'].flat[ind] = True
+ if bbox:
+ b = path_bbox(verts)
+ selected['data'] = selected['data'][min(b[:, 1]):max(b[:, 1]),
+ min(b[:, 0]):max(b[:, 0])]
+
+ name_to_selector = {'lasso': LassoSelector,
+ 'rectangle': RectangleSelector}
+ selector = name_to_selector[selector]
+ onselect_dict = {LassoSelector: _onselect_lasso,
+ RectangleSelector: _onselect_rectangle}
+ kwargs_dict = {LassoSelector: {},
+ RectangleSelector: {'interactive': True}}
+
+ lasso = selector(ax, onselect_dict[selector], **kwargs_dict[selector])
+ input('Press Enter when done')
+ lasso.disconnect_events()
+ if not interactive:
+ plt.ioff()
+ return selected['data'], selected['mask']
+
+
+if __name__ == '__main__':
+ from skimage.data import coins
+
+ data = coins()
+ selected, mask = imshow_select(data, 'lasso', True)
+ plt.imsave('selected.png', selected)
+ plt.imsave('mask.png', mask) \ No newline at end of file
diff --git a/cv/seg/video_seg/processing.py b/cv/seg/video_seg/processing.py
new file mode 100644
index 0000000..eeaa6bb
--- /dev/null
+++ b/cv/seg/video_seg/processing.py
@@ -0,0 +1,58 @@
+import paddlehub as hub
+from PIL import Image
+import os
+import cv2
+import matplotlib.pyplot as plt
+import matplotlib.image as mpimg
+import numpy as np
+
+
+def video2jpg(video_file, output_path):
+ '''
+ 将视频文件video_file每一帧转成图片保存到output_path文件夹
+ '''
+ try:
+ os.makedirs(output_path) # 创建输出文件夹
+ except:
+ print()
+
+ # 读取视频文件
+ cap = cv2.VideoCapture(video_file)
+ n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
+ num = 0
+ while True:
+ ret, frame = cap.read()
+ if ret:
+ cv2.imwrite('{}/{}-{}.jpg'.format(output_path, n_frames, num), frame)
+ num += 1
+ else:
+ break
+ cap.release() # 关闭视频
+
+
+def humanseg(images):
+
+
+ # 装载模型
+ module = hub.Module(name="deeplabv3p_xception65_humanseg")
+ # 执行模型segmentation(抠图)命令
+ module.segmentation(data={"image": images}, visualization=True)
+
+ # for i, img in enumerate(images):
+ # print(i, img)
+ # result = module.segmentation(data={"image": [img]}, visualization=True)
+
+
+def file_list(listdir):
+ im_list = []
+ imgs = os.listdir(listdir)
+ for img in imgs:
+ im_list.append(os.path.join(listdir, img))
+ return im_list
+
+
+if __name__ == '__main__':
+ # video2jpg('vtest.avi', 'v2j')
+
+ img_list = file_list('./v2j')
+ humanseg(img_list)
diff --git a/cv/seg/video_seg/vtest.avi b/cv/seg/video_seg/vtest.avi
new file mode 100755
index 0000000..965ab12
--- /dev/null
+++ b/cv/seg/video_seg/vtest.avi
Binary files differ