Skip to content

Commit 994afb1

Browse files
committed
lint(): run pylint
1 parent 66d7ebe commit 994afb1

File tree

6 files changed

+171
-139
lines changed

6 files changed

+171
-139
lines changed

LICENSE.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
MIT License
22

3-
Copyright (c) 2020 Amit Moryossef
3+
Copyright (c) 2021 Amit Moryossef
44

55
Permission is hereby granted, free of charge, to any person obtaining a copy
66
of this software and associated documentation files (the "Software"), to deal

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ The format supports any type of poses, arbitrary number of people, and arbitrary
88

99
The main idea is having a `header` with instructions on how many points exists, where, and how to connect them.
1010

11-
The binary spec can be found in [format/spec.md](format/spec.md).
11+
The binary spec can be found in [lib/specs/v0.1.md](lib/specs/v0.1.md).
1212

1313
### Python Usage
1414
```bash

pose_format/pose.py

+128-116
Original file line numberDiff line numberDiff line change
@@ -11,123 +11,135 @@
1111

1212

1313
class Pose:
14-
"""File IO for '.pose' file format, including the header and body"""
15-
16-
def __init__(self, header: PoseHeader, body: PoseBody):
17-
"""
18-
:param header: PoseHeader
19-
:param body: PoseBody
20-
"""
21-
self.header = header
22-
self.body = body
23-
24-
@staticmethod
25-
def read(buffer: bytes, pose_body: PoseBody = NumPyPoseBody):
26-
reader = BufferReader(buffer)
27-
header = PoseHeader.read(reader)
28-
body = pose_body.read(header, reader)
29-
30-
return Pose(header, body)
31-
32-
def write(self, buffer: BinaryIO):
33-
self.header.write(buffer)
34-
self.body.write(self.header.version, buffer)
35-
36-
def focus(self):
37-
"""
38-
Gets the pose to start at (0,0) and have dimensions as big as needed
39-
"""
40-
mins = ma.min(self.body.data, axis=(0, 1, 2))
41-
maxs = ma.max(self.body.data, axis=(0, 1, 2))
42-
43-
if np.count_nonzero(mins) > 0: # Only translate if there is a number to translate by
44-
self.body.data = ma.subtract(self.body.data, mins)
45-
46-
dimensions = (maxs - mins).tolist()
47-
self.header.dimensions = PoseHeaderDimensions(*dimensions)
48-
49-
def normalize(self, info: PoseNormalizationInfo, scale_factor: float = 1):
50-
"""
51-
Normalize the point to a fixed distance between two points
52-
"""
53-
mask = self.body.data.mask
54-
transposed = self.body.zero_filled().points_perspective()
55-
56-
p1s = transposed[info.p1]
57-
p2s = transposed[info.p2]
58-
59-
if transposed.shape[1] == 0:
60-
p1s = p1s[0]
61-
p2s = p2s[0]
62-
else:
63-
p1s = ma.concatenate(p1s)
64-
p2s = ma.concatenate(p2s)
65-
66-
# Move all points so center is (0,0)
67-
center = np.mean((p2s + p1s) / 2, axis=0)
68-
self.body.data -= center
69-
70-
mean_distance = np.mean(distance_batch(p1s, p2s))
71-
72-
scale = scale_factor / mean_distance # scale all points to dist/scale
73-
74-
if round(scale, 5) != 1:
75-
self.body.data = ma.multiply(self.body.data, scale)
76-
77-
self.body.data = ma.array(self.body.data, mask=mask)
78-
79-
return self
80-
81-
def frame_dropout(self, dropout_std=0.1):
82-
body, selected_indexes = self.body.frame_dropout(dropout_std=dropout_std)
83-
return Pose(header=self.header, body=body), selected_indexes
84-
85-
def get_components(self, components: List[str]):
86-
indexes = []
87-
new_components = []
88-
89-
idx = 0
90-
for component in self.header.components:
91-
if component.name in components:
92-
new_components.append(component)
93-
indexes += list(range(idx, len(component.points) + idx))
94-
idx += len(component.points)
95-
96-
new_header = PoseHeader(self.header.version, self.header.dimensions, new_components)
97-
new_body = self.body.get_points(indexes)
98-
99-
return Pose(header=new_header, body=new_body)
100-
101-
def bbox(self):
102-
body = self.body.bbox(self.header)
103-
header = self.header.bbox()
104-
return Pose(header=header, body=body)
105-
106-
pass_through_methods = {
107-
"augment2d", # Augment 2D points
108-
"interpolate", # Interpolate missing pose points
109-
"torch", # Convert body to torch
110-
"tensorflow", # Convert body to tensorflow
111-
"slice_step", # Step through the data
112-
}
113-
114-
def __getattr__(self, attr):
115-
if attr not in Pose.pass_through_methods:
116-
raise AttributeError("Attribute '%s' doesn't exist on class Pose" % attr)
117-
118-
def func(*args, **kwargs):
119-
prop = getattr(self.body, attr)
120-
body_res = prop(*args, **kwargs)
14+
"""File IO for '.pose' file format, including the header and body"""
12115

122-
if isinstance(body_res, PoseBody):
123-
header = self.header
124-
if hasattr(header, attr):
125-
header_res = getattr(header, attr)(*args, **kwargs)
126-
if isinstance(header_res, PoseHeader):
127-
header = header_res
16+
def __init__(self, header: PoseHeader, body: PoseBody):
17+
"""
18+
:param header: PoseHeader
19+
:param body: PoseBody
20+
"""
21+
self.header = header
22+
self.body = body
12823

129-
return Pose(header, body_res)
24+
@staticmethod
25+
def read(buffer: bytes, pose_body: PoseBody = NumPyPoseBody):
26+
reader = BufferReader(buffer)
27+
header = PoseHeader.read(reader)
28+
body = pose_body.read(header, reader)
13029

131-
return body_res
30+
return Pose(header, body)
13231

133-
return func
32+
def write(self, buffer: BinaryIO):
33+
self.header.write(buffer)
34+
self.body.write(self.header.version, buffer)
35+
36+
def focus(self):
37+
"""
38+
Gets the pose to start at (0,0) and have dimensions as big as needed
39+
"""
40+
mins = ma.min(self.body.data, axis=(0, 1, 2))
41+
maxs = ma.max(self.body.data, axis=(0, 1, 2))
42+
43+
if np.count_nonzero(mins) > 0: # Only translate if there is a number to translate by
44+
self.body.data = ma.subtract(self.body.data, mins)
45+
46+
dimensions = (maxs - mins).tolist()
47+
self.header.dimensions = PoseHeaderDimensions(*dimensions)
48+
49+
def normalize(self, info: PoseNormalizationInfo, scale_factor: float = 1):
50+
"""
51+
Normalize the point to a fixed distance between two points
52+
"""
53+
mask = self.body.data.mask
54+
transposed = self.body.zero_filled().points_perspective()
55+
56+
p1s = transposed[info.p1]
57+
p2s = transposed[info.p2]
58+
59+
if transposed.shape[1] == 0:
60+
p1s = p1s[0]
61+
p2s = p2s[0]
62+
else:
63+
p1s = ma.concatenate(p1s)
64+
p2s = ma.concatenate(p2s)
65+
66+
# Move all points so center is (0,0)
67+
center = np.mean((p2s + p1s) / 2, axis=0)
68+
self.body.data -= center
69+
70+
mean_distance = np.mean(distance_batch(p1s, p2s))
71+
72+
scale = scale_factor / mean_distance # scale all points to dist/scale
73+
74+
if round(scale, 5) != 1:
75+
self.body.data = ma.multiply(self.body.data, scale)
76+
77+
self.body.data = ma.array(self.body.data, mask=mask)
78+
79+
return self
80+
81+
def normalize_distribution(self, mu=None, std=None):
82+
mu = mu if mu is not None else ma.mean(self.body.data, axis=(0, 1))
83+
std = std if std is not None else ma.std(self.body.data, axis=(0, 1))
84+
85+
self.body.data = (self.body.data - mu) / std
86+
87+
return mu, std
88+
89+
def unnormalize_distribution(self, mu, std):
90+
self.body.data = (self.body.data * std) + mu
91+
92+
def frame_dropout(self, dropout_std=0.1):
93+
body, selected_indexes = self.body.frame_dropout(dropout_std=dropout_std)
94+
return Pose(header=self.header, body=body), selected_indexes
95+
96+
def get_components(self, components: List[str]):
97+
indexes = []
98+
new_components = []
99+
100+
idx = 0
101+
for component in self.header.components:
102+
if component.name in components:
103+
new_components.append(component)
104+
indexes += list(range(idx, len(component.points) + idx))
105+
idx += len(component.points)
106+
107+
new_header = PoseHeader(self.header.version, self.header.dimensions, new_components)
108+
new_body = self.body.get_points(indexes)
109+
110+
return Pose(header=new_header, body=new_body)
111+
112+
def bbox(self):
113+
body = self.body.bbox(self.header)
114+
header = self.header.bbox()
115+
return Pose(header=header, body=body)
116+
117+
pass_through_methods = {
118+
"augment2d", # Augment 2D points
119+
"flip", # Flip pose on axis
120+
"interpolate", # Interpolate missing pose points
121+
"torch", # Convert body to torch
122+
"tensorflow", # Convert body to tensorflow
123+
"slice_step", # Step through the data
124+
}
125+
126+
def __getattr__(self, attr):
127+
if attr not in Pose.pass_through_methods:
128+
raise AttributeError("Attribute '%s' doesn't exist on class Pose" % attr)
129+
130+
def func(*args, **kwargs):
131+
prop = getattr(self.body, attr)
132+
body_res = prop(*args, **kwargs)
133+
134+
if isinstance(body_res, PoseBody):
135+
header = self.header
136+
if hasattr(header, attr):
137+
header_res = getattr(header, attr)(*args, **kwargs)
138+
if isinstance(header_res, PoseHeader):
139+
header = header_res
140+
141+
return Pose(header, body_res)
142+
143+
return body_res
144+
145+
return func

pose_format/pose_body.py

+6-2
Original file line numberDiff line numberDiff line change
@@ -129,10 +129,14 @@ def augment2d(self, rotation_std=0.2, shear_std=0.2, scale_std=0.2):
129129
# Based on https://en.wikipedia.org/wiki/Scaling_(geometry)
130130
if scale_std > 0:
131131
scale_matrix = np.eye(2)
132-
scale_matrix[0][0] += np.random.normal(loc=0, scale=scale_std, size=1)[0]
132+
scale_matrix[1][1] += np.random.normal(loc=0, scale=scale_std, size=1)[0]
133133
matrix = np.dot(matrix, scale_matrix)
134134

135-
return self.matmul(matrix.astype(dtype=np.float32))
135+
# Cast to matrix the correct size
136+
dim_matrix = np.eye(self.data.shape[-1])
137+
dim_matrix[0:2,0:2] = matrix
138+
139+
return self.matmul(dim_matrix.astype(dtype=np.float32))
136140

137141
def zero_filled(self) -> __qualname__:
138142
raise NotImplementedError("'zero_filled' not implemented on '%s'" % self.__class__)

pose_format/pose_visualizer.py

+30-17
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import itertools
2+
from functools import lru_cache
23
from typing import Tuple, Iterator
34

45
import cv2
@@ -15,39 +16,42 @@ def __init__(self, pose: Pose):
1516
self.pose = pose
1617

1718
def _draw_frame(self, frame: ma.MaskedArray, frame_confidence: np.ndarray, img) -> np.ndarray:
18-
avg_color = np.mean(img, axis=(0, 1))
19-
# print("avg_color", avg_color)
19+
background_color = img[0][0] # Estimation of background color for opacity. `mean` is slow
20+
21+
thickness = round(math.sqrt(img.shape[0] * img.shape[1]) / 300)
22+
radius = round(thickness/2)
2023

2124
for person, person_confidence in zip(frame, frame_confidence):
2225
c = person_confidence.tolist()
2326
idx = 0
2427
for component in self.pose.header.components:
2528
colors = [np.array(c[::-1]) for c in component.colors]
2629

30+
@lru_cache(maxsize=None)
2731
def _point_color(p_i: int):
2832
opacity = c[p_i + idx]
29-
np_color = colors[p_i % len(component.colors)] * opacity + (1 - opacity) * avg_color
33+
np_color = colors[p_i % len(component.colors)] * opacity + (1 - opacity) * background_color
3034
return tuple([int(c) for c in np_color])
3135

3236
# Draw Points
3337
for i in range(len(component.points)):
3438
if c[i + idx] > 0:
35-
cv2.circle(img=img, center=tuple(person[i + idx]), radius=3,
39+
cv2.circle(img=img, center=tuple(person[i + idx][:2]), radius=radius,
3640
color=_point_color(i), thickness=-1)
3741

3842
if self.pose.header.is_bbox:
3943
point1 = tuple(person[0 + idx].tolist())
4044
point2 = tuple(person[1 + idx].tolist())
4145
color = tuple(np.mean([_point_color(0), _point_color(1)], axis=0))
4246

43-
cv2.rectangle(img=img, pt1=point1, pt2=point2, color=color, thickness=2)
47+
cv2.rectangle(img=img, pt1=point1, pt2=point2, color=color, thickness=thickness)
4448
else:
4549
int_person = person.astype(np.int32)
4650
# Draw Limbs
4751
for (p1, p2) in component.limbs:
4852
if c[p1 + idx] > 0 and c[p2 + idx] > 0:
49-
point1 = tuple(int_person[p1 + idx].tolist())
50-
point2 = tuple(int_person[p2 + idx].tolist())
53+
point1 = tuple(int_person[p1 + idx].tolist()[:2])
54+
point2 = tuple(int_person[p2 + idx].tolist()[:2])
5155

5256
length = ((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2) ** 0.5
5357

@@ -56,7 +60,7 @@ def _point_color(p_i: int):
5660
deg = math.degrees(math.atan2(point1[1] - point2[1], point1[0] - point2[0]))
5761
polygon = cv2.ellipse2Poly(
5862
(int((point1[0] + point2[0]) / 2), int((point1[1] + point2[1]) / 2)),
59-
(int(length / 2), 3),
63+
(int(length / 2), thickness),
6064
int(deg),
6165
0, 360, 1)
6266
cv2.fillConvexPoly(img=img, points=polygon, color=color)
@@ -67,28 +71,37 @@ def _point_color(p_i: int):
6771

6872
def draw(self, background_color: Tuple[int, int, int] = (255, 255, 255), max_frames: int = None):
6973
int_data = np.array(np.around(self.pose.body.data.data), dtype="int32")
74+
background = np.full((self.pose.header.dimensions.height, self.pose.header.dimensions.width, 3),
75+
fill_value=background_color, dtype="uint8")
7076
for frame, confidence in itertools.islice(zip(int_data, self.pose.body.confidence), max_frames):
71-
background = np.full((self.pose.header.dimensions.height, self.pose.header.dimensions.width, 3),
72-
fill_value=background_color,
73-
dtype="uint8")
74-
yield self._draw_frame(frame, confidence, img=background)
77+
yield self._draw_frame(frame, confidence, img=background.copy())
7578

76-
def draw_on_video(self, background_video: str, max_frames: int = None, blur=False):
79+
def draw_on_video(self, background_video, max_frames: int = None, blur=False):
7780
int_data = np.array(np.around(self.pose.body.data.data), dtype="int32")
7881

7982
if max_frames is None:
8083
max_frames = len(int_data)
8184

82-
cap = cv2.VideoCapture(background_video)
83-
for frame, confidence in itertools.islice(zip(int_data, self.pose.body.confidence), max_frames):
84-
_, background = cap.read()
85+
def get_frames(video_path):
86+
87+
cap = cv2.VideoCapture(video_path)
88+
while True:
89+
ret, vf = cap.read()
90+
if not ret:
91+
break
92+
yield vf
93+
cap.release()
94+
95+
if isinstance(background_video, str):
96+
background_video = iter(get_frames(background_video))
97+
98+
for frame, confidence, background in itertools.islice(zip(int_data, self.pose.body.confidence, background_video), max_frames):
8599
background = cv2.resize(background, (self.pose.header.dimensions.width, self.pose.header.dimensions.height))
86100

87101
if blur:
88102
background = cv2.blur(background, (20, 20))
89103

90104
yield self._draw_frame(frame, confidence, background)
91-
cap.release()
92105

93106
def save_frame(self, f_name: str, frame: np.ndarray):
94107
cv2.imwrite(f_name, frame)

0 commit comments

Comments
 (0)