/args_util.py (8eb8fa4aebc1a390464618e803794fc5a15a1400) (8408 bytes) (mode 100644) (type blob)

"""
contain dummy args with config
helpfull for copy paste Kaggle
"""
import argparse
from hard_code_variable import HardCodeVariable

def make_args(gpu="0", task="task_one_"):
    """
    these arg does not have any required commandline arg (all with default value)
    :param train_json:
    :param test_json:
    :param pre:
    :param gpu:
    :param task:
    :return:
    """
    parser = argparse.ArgumentParser(description='PyTorch CSRNet')

    args = parser.parse_args()
    args.gpu = gpu
    args.task = task
    args.pre = None
    return args

class Meow():
    def __init__(self):
        pass


def make_meow_args(gpu="0", task="task_one_"):
    args = Meow()
    args.gpu = gpu
    args.task = task
    args.pre = None
    return args


def like_real_args_parse(data_input):
    args = Meow()
    args.input = data_input
    args.original_lr = 1e-7
    args.lr = 1e-7
    args.batch_size = 1
    args.momentum = 0.95
    args.decay = 5 * 1e-4
    args.start_epoch = 0
    args.epochs = 120
    args.steps = [-1, 1, 100, 150]
    args.scales = [1, 1, 1, 1]
    args.workers = 4
    args.print_freq = 30


def context_aware_network_args_parse():
    """
    this is not dummy
    if you are going to make all-in-one notebook, ignore this
    :return:
    """
    parser = argparse.ArgumentParser(description='CrowdCounting Context Aware Network')
    parser.add_argument("--task_id", action="store", default="dev")
    parser.add_argument('-a', action="store_true", default=False)

    parser.add_argument('--input', action="store",  type=str, default=HardCodeVariable().SHANGHAITECH_PATH_PART_A)
    parser.add_argument('--output', action="store", type=str, default="saved_model/context_aware_network")
    parser.add_argument('--datasetname', action="store", default="shanghaitech_keepfull")

    # args with default value
    parser.add_argument('--load_model', action="store", default="", type=str)
    parser.add_argument('--lr', action="store", default=1e-8, type=float)
    parser.add_argument('--momentum', action="store", default=0.9, type=float)
    parser.add_argument('--decay', action="store", default=5*1e-3, type=float)
    parser.add_argument('--epochs', action="store", default=1, type=int)
    parser.add_argument('--test', action="store_true", default=False)


    arg = parser.parse_args()
    return arg


def my_args_parse():
    parser = argparse.ArgumentParser(description='CrowdCounting Context Aware Network')
    parser.add_argument("--task_id", action="store", default="dev")
    parser.add_argument('--note', action="store", default="write anything")

    parser.add_argument('--input', action="store",  type=str, default=HardCodeVariable().SHANGHAITECH_PATH_PART_A)
    parser.add_argument('--datasetname', action="store", default="shanghaitech_keepfull")

    # args with default value
    parser.add_argument('--load_model', action="store", default="", type=str)
    parser.add_argument('--lr', action="store", default=1e-8, type=float)
    parser.add_argument('--momentum', action="store", default=0.9, type=float)
    parser.add_argument('--decay', action="store", default=5*1e-3, type=float)
    parser.add_argument('--epochs', action="store", default=1, type=int)
    parser.add_argument('--batch_size', action="store", default=1, type=int,
                        help="only set batch_size > 0 for dataset with image size equal")
    parser.add_argument('--test', action="store_true", default=False)
    parser.add_argument('--no_norm', action="store_true", default=False,
                        help="if true, does not use transforms.Normalize in dataloader")
    arg = parser.parse_args()
    return arg


def meow_parse():
    parser = argparse.ArgumentParser(description='CrowdCounting Context Aware Network')
    parser.add_argument("--task_id", action="store", default="dev")
    parser.add_argument("--model", action="store", default="dev")
    parser.add_argument('--note', action="store", default="write anything")

    parser.add_argument('--input', action="store",  type=str, default=HardCodeVariable().SHANGHAITECH_PATH_PART_A)
    parser.add_argument('--datasetname', action="store", default="shanghaitech_keepfull")

    # args with default value
    parser.add_argument('--load_model', action="store", default="", type=str)
    parser.add_argument('--lr', action="store", default=1e-8, type=float)
    parser.add_argument('--momentum', action="store", default=0.9, type=float)
    parser.add_argument('--decay', action="store", default=5*1e-3, type=float)
    parser.add_argument('--epochs', action="store", default=1, type=int)
    parser.add_argument('--batch_size', action="store", default=1, type=int,
                        help="only set batch_size > 0 for dataset with image size equal")
    parser.add_argument('--test', action="store_true", default=False)
    parser.add_argument('--no_norm', action="store_true", default=False,
                        help="if true, does not use transforms.Normalize in dataloader")
    parser.add_argument('--cache', action="store_true", default=False,
                        help="use cache for dataloader, recommend True if the data does not change every epoch")
    parser.add_argument('--pin_memory', action="store_true", default=False,
                        help="don't know what is it")
    parser.add_argument('--skip_train_eval', action="store_true", default=False,
                        help="if true, do not run eval on training set to save time")
    # parser.add_argument('--use_ssim', action="store_true", default=False,
    #                     help="if true, use mse and negative ssim as loss function")
    parser.add_argument('--loss_fn', action="store", default="MSE", type=str)
    parser.add_argument('--optim', action="store", default="adam", type=str)
    parser.add_argument('--eval_only', action="store_true", default=False)
    arg = parser.parse_args()
    return arg


def sanity_check_dataloader_parse():
    parser = argparse.ArgumentParser(description='Dataloader')
    parser.add_argument('--input', action="store",  type=str, default=HardCodeVariable().SHANGHAITECH_PATH_PART_A)
    parser.add_argument('--datasetname', action="store", default="shanghaitech_keepfull")
    arg = parser.parse_args()
    return arg


def train_test_split_parse():
    parser = argparse.ArgumentParser(description='Dataloader')
    parser.add_argument('--input', action="store",  type=str, default=HardCodeVariable().SHANGHAITECH_PATH_PART_A)
    arg = parser.parse_args()
    return arg


def real_args_parse():
    """
    this is not dummy
    if you are going to make all-in-one notebook, ignore this
    :return:
    """
    parser = argparse.ArgumentParser(description='CrowdCounting')
    parser.add_argument("--task_id", action="store", default="dev")
    parser.add_argument('-a', action="store_true", default=False)

    parser.add_argument('--input', action="store",  type=str, default=HardCodeVariable().SHANGHAITECH_PATH_PART_A)
    parser.add_argument('--output', action="store", type=str, default="saved_model")
    parser.add_argument('--model', action="store", default="pacnn")

    # args with default value
    parser.add_argument('--load_model', action="store", default="", type=str)
    parser.add_argument('--lr', action="store", default=1e-8, type=float)
    parser.add_argument('--momentum', action="store", default=0.9, type=float)
    parser.add_argument('--decay', action="store", default=5*1e-3, type=float)
    parser.add_argument('--epochs', action="store", default=1, type=int)
    parser.add_argument('--test', action="store_true", default=False)

    # pacnn setting only
    parser.add_argument('--PACNN_PERSPECTIVE_AWARE_MODEL', action="store", default=0, type=int)
    parser.add_argument('--PACNN_MUTILPLE_SCALE_LOSS', action="store", default=1, type=int,
                        help="1: compare each of  density map/perspective map scale with gt for loss."
                             "0: only compare final density map and final density perspective map")

    # args.original_lr = 1e-7
    # args.lr = 1e-7
    # args.batch_size = 1
    # args.momentum = 0.95
    # args.decay = 5 * 1e-4
    # args.start_epoch = 0
    # args.epochs = 120
    # args.steps = [-1, 1, 100, 150]
    # args.scales = [1, 1, 1, 1]
    # args.workers = 4
    # args.seed = time.time()
    # args.print_freq = 30

    arg = parser.parse_args()
    return arg

Mode Type Size Ref File
100644 blob 61 169fe2b7d512a59cfedf86ddb7ed040173c7434d .gitignore
100644 blob 1342 f2eb3073ff4a8536cf4e8104ff942b525e3c7f34 .travis.yml
100644 blob 1255 1dfa426237bc174a2ba2186240191a6b7041bc86 README.md
100644 blob 8408 8eb8fa4aebc1a390464618e803794fc5a15a1400 args_util.py
040000 tree - 5e9d7f0e1fd3a9e4d5a37f3d6de0c3ecd3125af8 backup_notebook
040000 tree - 55d1d196f5b6ed4bfc1e8a715df1cfff1dd18117 bug
100644 blob 3591 7b4c18e8cf2c0417cd13d3f77ea0571c9e0e493f crowd_counting_error_metrics.py
100644 blob 48901 26d0022f5dbe73fd0cc063d7c645579a435acad9 data_flow.py
040000 tree - 7b2560d2cb223bf0574eb278bafeda5a8577c7db data_util
040000 tree - e4cfcbf81993d179063f70f45b58b4e2c49dff4d dataset_script
040000 tree - 11b308d7571c6fd89345da40967301d8ca515100 debug
040000 tree - 9862b9cbc6e7a1d43565f12d85d9b17d1bf1814e env_file
100644 blob 4460 9b254c348a3453f4df2c3ccbf21fb175a16852de eval_context_aware_network.py
100644 blob 428 35cc7bfe48a4ed8dc56635fd3a6763612d8af771 evaluator.py
100644 blob 16625 74787c714584bf4c7aa5e0fb87a9776576d1239e experiment_main.py
100644 blob 8876 049432d6bde50245a4acba4e116d59605b5b6315 experiment_meow_main.py
100644 blob 1916 1d228fa4fa2887927db069f0c93c61a920279d1f explore_model_summary.py
100644 blob 2718 b09b84e8b761137654ba6904669799c4866554b3 hard_code_variable.py
040000 tree - b3aa858a157f5e1e22c00fdb6f9dd071f4c6c163 local_train_script
040000 tree - 927d159228536a86499de8a294700f8599b8a60b logs
100644 blob 15300 cb90faba0bd4a45f2606a1e60975ed05bfacdb07 main_pacnn.py
100644 blob 2760 3c2d5ba1c81ef2770ad216c566e268f4ece17262 main_shanghaitech.py
100644 blob 2683 29189260c1a2c03c8e59cd0b4bd61df19d5ce098 main_ucfcc50.py
100644 blob 2794 f37b3bb572c53dd942c51243bd5b0853228c6ddb model_util.py
040000 tree - 3e68f1cb103228fc5e5d22db43874f853152bb39 models
100644 blob 870 8f5ce4f7e0b168add5ff2a363faa973a5b56ca48 mse_l1_loss.py
100644 blob 1066 811554259182e63240d7aa9406f315377b3be1ac mse_ssim_loss.py
040000 tree - 2cc497edce5da8793879cc5e82718d1562ef17e8 playground
040000 tree - c7c295e9e418154ae7c754dc888a77df8f50aa61 pytorch_ssim
100644 blob 1727 1cd14cbff636cb6145c8bacf013e97eb3f7ed578 sanity_check_dataloader.py
040000 tree - a1e8ea43eba8a949288a00fff12974aec8692003 saved_model_best
100644 blob 3525 27067234ad3deddd743dcab0d7b3ba4812902656 train_attn_can_adcrowdnet.py
100644 blob 3488 e47bfc7e91c46ca3c61be0c5258302de4730b06d train_attn_can_adcrowdnet_freeze_vgg.py
100644 blob 5352 3ee3269d6fcc7408901af46bed52b1d86ee9818c train_attn_can_adcrowdnet_simple.py
100644 blob 5728 90b846b68f15bdc58e3fd60b41aa4b5d82864ec4 train_attn_can_adcrowdnet_simple_lrscheduler.py
100644 blob 9081 664051f8838434c386e34e6dd6e6bca862cb3ccd train_compact_cnn.py
100644 blob 5702 fdec7cd1ee062aa4a2182a91e2fb1bd0db3ab35f train_compact_cnn_lrscheduler.py
100644 blob 5611 2a241c876015db34681d73ce534221de482b0b90 train_compact_cnn_sgd.py
100644 blob 3525 eb52f7a4462687c9b2bf1c3a887014c4afefa26d train_context_aware_network.py
100644 blob 5651 48631e36a1fdc063a6d54d9206d2fd45521d8dc8 train_custom_compact_cnn.py
100644 blob 5594 07d6c9c056db36082545b5b60b1c00d9d9f6396d train_custom_compact_cnn_lrscheduler.py
100644 blob 5281 8a92eb87b54f71ad2a799a7e05020344a22e22d3 train_custom_compact_cnn_sgd.py
040000 tree - 16528a64dd0d3af266e22f899cd8b74d99c4fcdc train_script
100644 blob 6595 5b8afd4fb322dd7cbffd1a589ff5276b0e3edeb5 visualize_data_loader.py
100644 blob 1146 1b0f845587f0f37166d44fa0c74b51f89cf8b349 visualize_util.py
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main