List of commits:
Subject Hash Author Date (UTC)
predict on frame from video ec39a97372a15ad75811fdb3417d2e48078a30b2 Thai Thien 2020-12-18 15:40:36
fix normalize in data loader 56a64c14dc62c96aba532d233ff2782414738d05 Thai Thien 2020-12-17 15:36:22
change gpu 5 46d32b3fc05909b61f56389b01c2e50e52937d9b Thai Thien 2020-12-16 16:40:47
eval_dccnn_bike_video_VID_20201204_133931_404_model_adamw1_bigtail13i_t4_bike20 95bb636a0b544b7a72fc4c65acb47472da468766 Thai Thien 2020-12-16 16:35:14
eval_dccnn_bike_video_VID_20201204_133931_404_model_adamw1_bigtail13i_t3_bike20 27740a0e21ca42490f5cfe516b2453f7be3a2483 Thai Thien 2020-12-16 16:17:12
batch 2 85433ffd51070572d7e743a2b4ef53f340b658d4 Thai Thien 2020-12-15 16:57:56
reduce batch size 3 d2f145009c06f5e6e6e1e0270878ae275b611c2f Thai Thien 2020-12-15 16:55:46
t3 and t4 70e09414713ea22800d8e87a0dd984f18406c375 Thai Thien 2020-12-15 16:50:32
prepare bike 20s q100 556a4d8f695ca154a5db42b363a276d7ab233434 Thai Thien 2020-12-15 16:46:08
VID_20201204_133931_404 8264dcb4d5389ed8ef2920eb3419cfc505b6a09c Thai Thien 2020-12-13 11:31:10
add VID_20201204_133931_404 a36ba35ca7c54dcf552d17a08fa8c87720ce1c69 Thai Thien 2020-12-13 11:30:24
print total length on console 6a956d93489926e99909d234bf4a7b13d39aad56 Thai Thien 2020-12-13 11:24:56
fix input of save_density_map dc2f014fdd570909b811ac77629f0c8c0ca156da Thai Thien 2020-12-13 11:21:37
detach cpu for save_density_map f41362501a0bc3578de0a48e6cbe97966a66677c Thai Thien 2020-12-13 11:19:45
predict_video_server pred.detach().cpu().numpy() 3c23cd51890f8b791be91ad8eb74bdba9ace905a Thai Thien 2020-12-13 11:14:51
remove stuff 212a4a703ae301fb90a0f292d8ccf19a1026e071 Thai Thien 2020-12-13 11:10:28
lot of code 6b7a87a75171b0ec8ffe85960a38dd68e787e8f7 Thai Thien 2020-12-13 11:06:50
WIP a6531891669668e489f67e90e862605ccb234911 Thai Thien 2020-12-12 16:02:50
video dataset 59decbaabe7942bd19934050a57867ec485ba1f7 Thai Thien 2020-12-12 11:03:11
generate density map f233f78f8c4fe17df088338d1b03e07f5a575896 Thai Thien 2020-12-12 03:44:15
Commit ec39a97372a15ad75811fdb3417d2e48078a30b2 - predict on frame from video
Author: Thai Thien
Author date (UTC): 2020-12-18 15:40
Committer name: Thai Thien
Committer date (UTC): 2020-12-18 15:40
Parent(s): 56a64c14dc62c96aba532d233ff2782414738d05
Signing key:
Tree: e459a481738b58b8c1dc23f4bd2f111dba92b1fa
File Lines added Lines deleted
data_flow.py 14 5
dataset_script/generate_frame_from_video.py 7 7
predict_image.py 37 9
predict_script/generate_video/bike_video_frame_404_dccnn_t4.sh 7 4
File data_flow.py changed (mode: 100644) (index 679d21c..8ddefa0)
... ... class PredictVideoDataset(Dataset):
1674 1674 """ """
1675 1675 v, a, info = read_video(video_path, pts_unit='sec') v, a, info = read_video(video_path, pts_unit='sec')
1676 1676
1677
1678 1677 self.video_tensor = v self.video_tensor = v
1679 1678 self.nSamples = self.video_tensor.shape[0] self.nSamples = self.video_tensor.shape[0]
1680 1679
 
... ... class PredictVideoDataset(Dataset):
1699 1698 return img, info return img, info
1700 1699
1701 1700 def get_predict_video_dataloader(video_path, visualize_mode = False, batch_size = 1): def get_predict_video_dataloader(video_path, visualize_mode = False, batch_size = 1):
1702 if not visualize_mode:
1701 if visualize_mode:
1703 1702 transformer = transforms.Compose([ transformer = transforms.Compose([
1704 transforms.Normalize(mean=[0.485, 0.456, 0.406],
1705 std=[0.229, 0.224, 0.225]),
1703 transforms.ToTensor()
1706 1704 ]) ])
1707 1705 else: else:
1708 transformer = None
1706 transformer = transforms.Compose([
1707 transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],
1708 std=[0.229, 0.224, 0.225]),
1709 ])
1710
1711 # if not visualize_mode:
1712 # transformer = transforms.Compose([
1713 # transforms.Normalize(mean=[0.485, 0.456, 0.406],
1714 # std=[0.229, 0.224, 0.225]),
1715 # ])
1716 # else:
1717 # transformer = None
1709 1718
1710 1719 loader = torch.utils.data.DataLoader(PredictVideoDataset( loader = torch.utils.data.DataLoader(PredictVideoDataset(
1711 1720 video_path, video_path,
File dataset_script/generate_frame_from_video.py changed (mode: 100644) (index cec44e9..169b28d)
... ... import os
8 8 from visualize_util import save_density_map_normalize, save_density_map from visualize_util import save_density_map_normalize, save_density_map
9 9
10 10 VIDEO_PATH = "/home/tt/Videos/VID_20201204_133931_404.mp4" VIDEO_PATH = "/home/tt/Videos/VID_20201204_133931_404.mp4"
11 OUTPUT_PATH = "/data/my_crowd_image/video_bike_20_q100"
11 OUTPUT_PATH = "/data/my_crowd_image/video_bike_q100"
12 12 v, a, info = read_video(VIDEO_PATH, pts_unit='sec') v, a, info = read_video(VIDEO_PATH, pts_unit='sec')
13 13 print(v.shape) print(v.shape)
14 14 length = v.shape[0] length = v.shape[0]
 
... ... print(length)
16 16
17 17 count = 0 count = 0
18 18 for i in range(length): for i in range(length):
19 if (i% 20 == 0):
20 frame = v[i]
21 frame = frame.permute(2, 0, 1)
22 file_out_path = os.path.join(OUTPUT_PATH, "IMG_" + str(i) + ".jpg")
23 write_jpeg(frame, file_out_path, quality=100)
24 print(file_out_path)
19 # if (i% 20 == 0):
20 frame = v[i]
21 frame = frame.permute(2, 0, 1)
22 file_out_path = os.path.join(OUTPUT_PATH, "IMG_" + str(i) + ".jpg")
23 write_jpeg(frame, file_out_path, quality=100)
24 print(file_out_path)
File predict_image.py copied from file demo_app/predict_only.py (similarity 54%) (mode: 100644) (index f3ee14f..94bd972)
... ... from data_flow import get_predict_dataloader
4 4 from models.dccnn import DCCNN from models.dccnn import DCCNN
5 5 from models.compact_cnn import CompactCNNV7 from models.compact_cnn import CompactCNNV7
6 6 from visualize_util import save_density_map_normalize, save_density_map from visualize_util import save_density_map_normalize, save_density_map
7 from comet_ml import Experiment
8 from args_util import meow_parse
7 9
8 10 if __name__ == "__main__": if __name__ == "__main__":
9 11 """ """
 
... ... if __name__ == "__main__":
11 13 output into another folder output into another folder
12 14 output density map and count in csv output density map and count in csv
13 15 """ """
14 NAME="adamw1_ccnnv7_t4_bike_prediction"
15 INPUT_FOLDER = "/data/my_crowd_image/dataset_batch1245/mybikedata/test_data/images/"
16 OUTPUT_FOLDER = "/data/my_crowd_image/dataset_batch1245/mybikedata/test_data/predicts/"
17 MODEL = "/data/save_model/adamw1_ccnnv7_t4_bike/adamw1_ccnnv7_t4_bike_checkpoint_valid_mae=-3.143752908706665.pth"
16
17 COMET_ML_API = "S3mM1eMq6NumMxk2QJAXASkUM"
18 PROJECT_NAME = "crowd-counting-debug"
19
20 experiment = Experiment(project_name=PROJECT_NAME, api_key=COMET_ML_API)
21
22 args = meow_parse()
23 video_path = args.input
24 OUTPUT_FOLDER = args.output
25 MODEL_PATH = args.load_model
26 model_name = args.model
27 NAME = args.task_id
28
29 experiment.set_name(args.task_id)
30 experiment.set_cmd_args()
31 experiment.log_text(args.note)
32
33 print(args)
34 n_thread = int(os.environ['OMP_NUM_THREADS'])
35 torch.set_num_threads(n_thread) # 4 thread
36 print("n_thread ", n_thread)
37 device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
38 print(device)
39
40 NAME = args.task_id
41 INPUT_FOLDER = args.input
42 OUTPUT_FOLDER = args.output
43 MODEL = args.model
18 44 input_list = [os.path.join(INPUT_FOLDER, dir) for dir in os.listdir(INPUT_FOLDER)] input_list = [os.path.join(INPUT_FOLDER, dir) for dir in os.listdir(INPUT_FOLDER)]
19 45 loader = get_predict_dataloader(input_list) loader = get_predict_dataloader(input_list)
20 46 loaded_file = torch.load(MODEL) loaded_file = torch.load(MODEL)
21 47 model = CompactCNNV7() model = CompactCNNV7()
22 48 model.load_state_dict(loaded_file['model']) model.load_state_dict(loaded_file['model'])
23 49 model.eval() model.eval()
50 model = model.to(device)
24 51 os.makedirs(os.path.join(OUTPUT_FOLDER, NAME), exist_ok=True) os.makedirs(os.path.join(OUTPUT_FOLDER, NAME), exist_ok=True)
25 52 log_file = open(os.path.join(OUTPUT_FOLDER, NAME, NAME +".log"), 'w') log_file = open(os.path.join(OUTPUT_FOLDER, NAME, NAME +".log"), 'w')
26 limit_count = 100
53 # limit_count = 100
27 54 count = 0 count = 0
28 55 for img, info in loader: for img, info in loader:
29 if count > limit_count:
30 break
56 # if count > limit_count:
57 # break
31 58 predict_name = "PRED_" + info["name"][0] predict_name = "PRED_" + info["name"][0]
32
59 img = img.to(device)
33 60 predict_path = os.path.join(OUTPUT_FOLDER, NAME, predict_name) predict_path = os.path.join(OUTPUT_FOLDER, NAME, predict_name)
34 61 pred = model(img) pred = model(img)
35 pred = pred.detach().numpy()[0][0]
62 pred = pred.detach().cpu().numpy()[0][0]
36 63 pred_count = pred.sum() pred_count = pred.sum()
37 64 log_line = info["name"][0] + "," + str(pred_count.item()) +"\n" log_line = info["name"][0] + "," + str(pred_count.item()) +"\n"
38 65 log_file.write(log_line) log_file.write(log_line)
39 66 save_density_map(pred, predict_path) save_density_map(pred, predict_path)
40 67 torch.save(pred, predict_path+".torch") torch.save(pred, predict_path+".torch")
41 68 print("save to ", predict_path) print("save to ", predict_path)
69 print(log_line)
42 70 count += 1 count += 1
43 71 log_file.close() log_file.close()
File predict_script/generate_video/bike_video_frame_404_dccnn_t4.sh copied from file predict_script/dccnn/dccnn_bike_video_VID_20201204_133931_404_model_adamw1_bigtail13i_t4_bike20.sh (similarity 50%) (mode: 100644) (index 4466c5f..59cf470)
1 task="eval_dccnn_bike_video_VID_20201204_133931_404_model_adamw1_bigtail13i_t4_bike20"
1 task="bike_video_frame_404_dccnn_t4"
2 2 # HTTPS_PROXY="http://10.60.28.99:86" # HTTPS_PROXY="http://10.60.28.99:86"
3 CUDA_VISIBLE_DEVICES=5 OMP_NUM_THREADS=4 PYTHONWARNINGS="ignore" HTTPS_PROXY="http://10.60.28.99:86" nohup python predict_video_server.py \
3 CUDA_VISIBLE_DEVICES=2 OMP_NUM_THREADS=2 PYTHONWARNINGS="ignore" HTTPS_PROXY="http://10.60.28.99:86" nohup python predict_image.py \
4 4 --task_id $task \ --task_id $task \
5 --note "eval bike20 bad model" \
5 --note "predict image frame 404, q100, model t4 " \
6 6 --model "BigTail13i" \ --model "BigTail13i" \
7 --input /data/rnd/thient/thient_data/crowd_counting_video/raw_video/VID_20201204_133931_404.mp4 \
7 --input /data/rnd/thient/thient_data/crowd_counting_video/video_frame/video_bike_q100 \
8 8 --output /data/rnd/thient/thient_data/crowd_counting_video/predict_video/ \ --output /data/rnd/thient/thient_data/crowd_counting_video/predict_video/ \
9 --eval_only \
9 10 --batch_size 1 \ --batch_size 1 \
10 11 --load_model /data/rnd/thient/crowd_counting_framework/saved_model_best/adamw1_bigtail13i_t4_bike20/adamw1_bigtail13i_t4_bike20_checkpoint_valid_mae=-3.2068.pt \ --load_model /data/rnd/thient/crowd_counting_framework/saved_model_best/adamw1_bigtail13i_t4_bike20/adamw1_bigtail13i_t4_bike20_checkpoint_valid_mae=-3.2068.pt \
12 --datasetname shanghaitech_non_overlap_test_with_densitygt \
13 --eval_density \
11 14 --epochs 1201 > logs/$task.log & --epochs 1201 > logs/$task.log &
12 15
13 16 echo logs/$task.log # for convenience echo logs/$task.log # for convenience
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main