Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Demo #4

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cfgs/pose_res152.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
GPUS: '0'
GPUS: (0,)
DATA_DIR: ''
OUTPUT_DIR: 'output'
LOG_DIR: 'log'
Expand Down
6 changes: 3 additions & 3 deletions evaluate.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@
# pose_number is how many number of keypoints we want to see in a person
# pose_threshold is the score we set to filter the keypoints whose score is small
# ground_truth='/export/home/cyh/mygithub/PoseGCN/data/annotations/val_2017/'
ground_truth='${PGPT_ROOT}/data/demodataset/annotations/'
predictions='${PGPT_ROOT}/results/demo'
results='test'
ground_truth='/PGPT/data/demodataset/annotations'
predictions='/PGPT/results/demo'
results='/PGPT/results/evaluate'
pose_number=0
pose_threshold=0.5

Expand Down
24 changes: 13 additions & 11 deletions inference/config.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,33 @@

class Config():
root = '${PGPT_ROOT}'
root = '/PGPT'

# save_dir is the loaction where we store the results
save_dir = root + '/results/demo'

# json_path_detection is the loaction where we store the detection results
json_path_detection = root + '/results/demo_detection.json'

# gt_json_path is the gound truth of the validiation, all the ground_truth are in one file
# gt_json_path is the ground truth of the validiation, all the ground_truth are in one file
gt_json_path = root + '/data/demo_val.json'

# the data folder of the PoseTrack dataset
data_folder = root + '/data/demodataset'

# the path of the location where we store the video
video_path = root

video_path = save_dir
# the path of the track model
track_model = root + '/models/tracker.pth'

# the path of the pose estimation model
pose_model = root + '/models/pose_gcn.pth.tar'

# pose config file location
pose_cfg = root + '/cfgs/pose_res152.yaml'

# the path of the embedding model
embedding_model = root + '/models/embedding_model.pth'

def __init__(self):
print('Using the config class at', __file__)
20 changes: 14 additions & 6 deletions inference/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,18 @@
config = Config()
def parseArgs():
parser = argparse.ArgumentParser(description="Evaluation of Pose Estimation and Tracking (PoseTrack)")
#arser.add_argument('--cfg', type=str, required=True) #added by alnguyen
parser.add_argument("-t", "--detection_thresh",dest = 'det_thresh',required=False, default=0.4, type= float)
parser.add_argument("-p", "--pos_thresh",dest = 'pose_thresh',required=False, default=0, type= float)
parser.add_argument("-v", "--vis_flag",dest = 'vis_flag',required=False, default=False, type= bool)
return parser.parse_args()
#parser.add_argument('opts',
#help='Modify config options using the command-line',
#default=None,
#nargs=argparse.REMAINDER) #added by alnguyen

args = parser.parse_args()

return args

class DateEncoder(json.JSONEncoder):
def default(self, obj):
Expand Down Expand Up @@ -87,24 +95,24 @@ def track_test(args, gpu_id=0):
if not os.path.exists(save_dir):
os.makedirs(save_dir)

# Load the Detection Results
# Load the Detection Results (demo_detection.json)
with open(json_path,'r') as f:
bbox_dict = json.load(f)

# Create the Tracker
tracker = Track_And_Detect(gpu_id=gpu_id, track_model=config.track_model, pose_model=config.pose_model, embedding_model=config.embedding_model)

# Load the Ground Truth to get the right video keys
# Load the Ground Truth to get the right video keys (demo_val.json)
with open(gt_json_path,'r') as f:
gt_dict = json.load(f)


video_keys = gt_dict.keys()
pbar = tqdm(range(len(video_keys)))
for video_name in video_keys:
for video_name in video_keys: #in demo_val.json
pbar.update(1)
frame_dict = bbox_dict[video_name]
video_name = video_name.replace('.json','')
#video_name = video_name.replace('.json','')
video_json = {'annolist':[]}
save_path = os.path.join(save_dir, video_name+'.json')
idx =0
Expand All @@ -124,7 +132,7 @@ def track_test(args, gpu_id=0):
if not os.path.exists(video_path):
os.makedirs(video_path)
video_store_name = video_path + '/{}.mp4'
videoWriter = cv2.VideoWriter(video_store_name.format(video_name),fourcc,10,(im_W,im_H))
videoWriter = cv2.VideoWriter(video_store_name.format(video_name+'-pgpt'),fourcc,10,(im_W,im_H))
final_list = tracker.init_tracker(frame,det_list)
else:
track_list = tracker.multi_track(frame)
Expand Down
2 changes: 1 addition & 1 deletion inference/pose_estimation_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@
'''
class PoseNet(object):
def __init__(self, gpu_id=0, model_path=None):
self.cfg_file='${PGPT_ROOT}/cfgs/pose_res152.yaml'
self.cfg_file='/PGPT/cfgs/pose_res152.yaml'
self.flag = 0

update_config(self.cfg_file)
Expand Down
44 changes: 28 additions & 16 deletions inference/skeleton_visulize.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,13 @@
import cv2
import numpy as np
from cv2_color import Color
import glob
from config import Config

# The match list from the results to the test
match_list=[13,12,14,9,8,10,7,11,6,3,2,4,1,5,0]
color = Color(flag='bgr')


def draw_limb(image, kps, color):
def draw_line(head, tail):
if head == [] or tail == []:
Expand All @@ -34,20 +35,21 @@ def draw_line(head, tail):
draw_line(kps[h], kps[t])



def demo(image_dir, result_dir, save_dir):
"""
image_dir: the location where the frames are stored
result_dir: the results in 2017PT fromat, each video has one file
save_dir: the loaction where we store the result videos
"""

json_files = os.listdir(result_dir)
#json_files = os.listdir(result_dir)
json_files = glob.glob(result_dir + "/*.json")
json_files = [json_file.split('/')[-1] for json_file in json_files]

pbar = tqdm(range(len(json_files)))
for json_name in json_files:

video_name = json_name.replace('.json','_new')
video_name = json_name.replace('.json','-pgpt')

video_folder = os.path.join(save_dir, video_name)

Expand All @@ -58,11 +60,13 @@ def demo(image_dir, result_dir, save_dir):
old_annolist = json.load(f)['annolist']
pbar.set_description('Visulizing video {}'.format(video_name))
color_list = color.get_random_color_list()
j = 0
for i,annotation in enumerate(old_annolist):
color_flag = 0
frame_name = annotation['image'][0]['name']
frame_store_path = video_folder + '/{}'.format(frame_name.split('/')[-1])
frame_path = os.path.join(image_dir,frame_name)
#annotation['image_id'] #10010010103=frame_id
frame_store_path = os.path.join(video_folder, frame_name.split('/')[-1]) #file
frame_path = os.path.join(image_dir,frame_name) #file for read
frame = cv2.imread(frame_path)
im_H, im_W, im_C = frame.shape
if i==0:
Expand All @@ -73,32 +77,40 @@ def demo(image_dir, result_dir, save_dir):
if len(anno['annopoints']) == 0:
continue
old_point_list = anno['annopoints'][0]['point']

xmin, xmax, ymin, ymax, track_id = anno['x1'][0], anno['x2'][0], anno['y1'][0], anno['y2'][0], anno['track_id'][0]
color_flag = int(track_id) % 16

kps = [[] for _ in range(15)]
for pose in old_point_list:

pose_id, pose_x, pose_y, = pose['id'][0], pose['x'][0], pose['y'][0]
kps[pose_id] = (int(pose_x), int(pose_y))
cv2.circle(frame,(int(pose_x),int(pose_y)), 3 ,color_list[color_flag], -1)
cv2.circle(frame,(int(pose_x),int(pose_y)), 3 ,color_list[color_flag], -1)
draw_limb(frame, kps, color_list[color_flag])
cv2.rectangle(frame, (int(xmin),int(ymin)), (int(xmax),int(ymax)), color_list[color_flag], 3)
cv2.putText(frame, 'id:' + str(track_id), (int(xmin),int(ymin)), cv2.FONT_HERSHEY_SIMPLEX, 1, color_list[color_flag], 2)
videoWriter.write(frame)
cv2.imwrite(frame_store_path, frame)
#videoWriter.release()
pbar.update(1)
pbar.close()


if __name__ == '__main__':
print('Visualizing the results')
image_dir = '${PGPT_ROOT}/data/demodataset/'
result_dir = '${PGPT_ROOT}/results/demo/'
save_dir = '${PGPT_ROOT}/results/render/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
demo(image_dir, result_dir, save_dir)
config = Config()
parser = argparse.ArgumentParser(description="Visualizing the results")
parser.add_argument("--image_dir", type=str, default=config.data_folder) # /PGPT/data/demodataset
parser.add_argument("--result_dir", type=str, default=config.save_dir) # /PGPT/results/demo
parser.add_argument("--save_dir", type=str, default=config.video_path) # /PGPT/results/demo
#parser.add_argument('--cfg', type=str, required=True) #added by alnguyen
#parser.add_argument('opts',
#help='Modify config options using the command-line',
#default=None,
#nargs=argparse.REMAINDER) #added by alnguyen

args = parser.parse_args()

if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
demo(args.image_dir, args.result_dir, args.save_dir)


2 changes: 1 addition & 1 deletion inference/track_and_detect_new.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from tracker import SiamFCTracker

from match import Matcher
from model.nms.nms_wrapper import nms
#from model.nms.nms_wrapper import nms

class Track_And_Detect(object):
effective_track_thresh = 0.5
Expand Down
7 changes: 4 additions & 3 deletions lib/poseval/py/eval_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,8 +227,9 @@ def process_arguments(argv):
elif len(argv)<3 or len(argv)>4:
help()

gt_file = argv[1]
pred_file = argv[2]
gt_dir = argv[1]
pred_dir = argv[2]
return gt_dir, pred_dir, mode

if not os.path.exists(gt_file):
help('Given ground truth directory does not exist!\n')
Expand Down Expand Up @@ -282,7 +283,6 @@ def load_data(argv):

return gtFramesAll, prFramesAll


def cleanupData(gtFramesAll,prFramesAll):

# remove all GT frames with empty annorects and remove corresponding entries from predictions
Expand Down Expand Up @@ -377,6 +377,7 @@ def load_data_dir(argv):
if not os.path.exists(pred_dir):
help('Given prediction directory ' + pred_dir + ' does not exist!\n')
filenames = glob.glob(gt_dir + "/*.json")

gtFramesAll = []
prFramesAll = []

Expand Down
28 changes: 0 additions & 28 deletions requirement.txt

This file was deleted.