-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
0 parents
commit 85c8d2f
Showing
18 changed files
with
2,023 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
input/* | ||
output/* | ||
.idea/* | ||
__pycache__ | ||
.ipynb_checkpoints | ||
model_data/* | ||
|
||
*.pyc | ||
*.swp | ||
*.swo | ||
*.swn |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,41 @@ | ||
# A3: People counter | ||
|
||
The main goal of the project is to count people on the streets. So all parameters are adjusted for the task. | ||
|
||
### Quick Start | ||
|
||
1. Clone repository. | ||
2. Download converted weights of [yolo.h5 model file with tf-1.4.0](https://drive.google.com/file/d/1uvXFacPnrSMw6ldWTyLLjGLETlEsUvcE/view?usp=sharing)/ Put them into **model_data** folder. | ||
3. Install requirements. | ||
4. Specify path to input fileRun model with cmd : | ||
``` | ||
python demo.py --videofile="path/to/your/videofile/" --out_root_dir="path/to/outptu/dir/" | ||
``` | ||
|
||
### Dependencies | ||
|
||
The code is compatible with Python 3. The following dependencies are needed to run the tracker: | ||
|
||
NumPy | ||
sklean | ||
OpenCV | ||
Pillow | ||
Keras | ||
|
||
Additionally, feature generation requires TensorFlow-1.4.0. | ||
|
||
### Run for other classes | ||
|
||
Be careful that the code ignores everything but person. Change class if you want run for other instance: | ||
|
||
[A3/yolo3/yolo.py]: | ||
|
||
if predicted_class != 'person': | ||
continue | ||
|
||
### Notes for future work | ||
You can use any Detector you like to replace Keras_version YOLO to get bboxes , for it is to slow ! | ||
|
||
Model file model_data/mars-small128.pb need by deep_sort had convert to tensorflow-1.4.0 | ||
|
||
**This work mainly based on https://github.com/Qidian213/deep_sort_yolov3. Thanks a lot guy.** |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,48 @@ | ||
import numpy as np | ||
|
||
|
||
class Detection(object): | ||
""" | ||
This class represents a bounding box detection in a single image. | ||
Parameters | ||
---------- | ||
tlwh : array_like | ||
Bounding box in format `(x, y, w, h)`. | ||
confidence : float | ||
Detector confidence score. | ||
feature : array_like | ||
A feature vector that describes the object contained in this image. | ||
Attributes | ||
---------- | ||
tlwh : ndarray | ||
Bounding box in format `(top left x, top left y, width, height)`. | ||
confidence : ndarray | ||
Detector confidence score. | ||
feature : ndarray | NoneType | ||
A feature vector that describes the object contained in this image. | ||
""" | ||
|
||
def __init__(self, tlwh, confidence, feature): | ||
self.tlwh = np.asarray(tlwh, dtype=np.float) | ||
self.confidence = float(confidence) | ||
self.feature = np.asarray(feature, dtype=np.float32) | ||
|
||
def to_tlbr(self): | ||
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e., | ||
`(top left, bottom right)`. | ||
""" | ||
ret = self.tlwh.copy() | ||
ret[2:] += ret[:2] | ||
return ret | ||
|
||
def to_xyah(self): | ||
"""Convert bounding box to format `(center x, center y, aspect ratio, | ||
height)`, where the aspect ratio is `width / height`. | ||
""" | ||
ret = self.tlwh.copy() | ||
ret[:2] += ret[2:] / 2 | ||
ret[2] /= ret[3] | ||
return ret |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
from __future__ import absolute_import | ||
|
||
import numpy as np | ||
|
||
from . import linear_assignment | ||
|
||
|
||
def iou(bbox, candidates): | ||
"""Computer intersection over union. | ||
Parameters | ||
---------- | ||
bbox : ndarray | ||
A bounding box in format `(top left x, top left y, width, height)`. | ||
candidates : ndarray | ||
A matrix of candidate bounding boxes (one per row) in the same format | ||
as `bbox`. | ||
Returns | ||
------- | ||
ndarray | ||
The intersection over union in [0, 1] between the `bbox` and each | ||
candidate. A higher score means a larger fraction of the `bbox` is | ||
occluded by the candidate. | ||
""" | ||
bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:] | ||
candidates_tl = candidates[:, :2] | ||
candidates_br = candidates[:, :2] + candidates[:, 2:] | ||
|
||
tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis], | ||
np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]] | ||
br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis], | ||
np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]] | ||
wh = np.maximum(0., br - tl) | ||
|
||
area_intersection = wh.prod(axis=1) | ||
area_bbox = bbox[2:].prod() | ||
area_candidates = candidates[:, 2:].prod(axis=1) | ||
return area_intersection / (area_bbox + area_candidates - area_intersection) | ||
|
||
|
||
def iou_cost(tracks, detections, track_indices=None, | ||
detection_indices=None): | ||
"""An intersection over union distance metric. | ||
Parameters | ||
---------- | ||
tracks : List[deep_sort.track.Track] | ||
A list of tracks. | ||
detections : List[deep_sort.detection.Detection] | ||
A list of detections. | ||
track_indices : Optional[List[int]] | ||
A list of indices to tracks that should be matched. Defaults to | ||
all `tracks`. | ||
detection_indices : Optional[List[int]] | ||
A list of indices to detections that should be matched. Defaults | ||
to all `detections`. | ||
Returns | ||
------- | ||
ndarray | ||
Returns a cost matrix of shape | ||
len(track_indices), len(detection_indices) where entry (i, j) is | ||
`1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`. | ||
""" | ||
if track_indices is None: | ||
track_indices = np.arange(len(tracks)) | ||
if detection_indices is None: | ||
detection_indices = np.arange(len(detections)) | ||
|
||
cost_matrix = np.zeros((len(track_indices), len(detection_indices))) | ||
for row, track_idx in enumerate(track_indices): | ||
if tracks[track_idx].time_since_update > 1: | ||
cost_matrix[row, :] = linear_assignment.INFTY_COST | ||
continue | ||
|
||
bbox = tracks[track_idx].to_tlwh() | ||
candidates = np.asarray([detections[i].tlwh for i in detection_indices]) | ||
cost_matrix[row, :] = 1. - iou(bbox, candidates) | ||
|
||
return cost_matrix |
Oops, something went wrong.