订阅
纠错
加入自媒体

detectron2系列:config软件包

2020-09-04 09:48
磐创AI
关注

配置参考

# -----------------------------------------------------------------------------

# Convention about Training / Test specific parameters

# -----------------------------------------------------------------------------

# Whenever an argument can be either used for training or for testing, the

# corresponding name will be post-fixed by a _TRAIN for a training parameter,

# or _TEST for a test-specific parameter.

# For example, the number of images during training will be

# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be

# IMAGES_PER_BATCH_TEST

# -----------------------------------------------------------------------------

# Config definition

# -----------------------------------------------------------------------------

_C = CN()

# The version number, to upgrade from old configs to new ones if any

# changes happen. It's recommended to keep a VERSION in your config file.

_C.VERSION = 2

_C.MODEL = CN()

_C.MODEL.LOAD_PROPOSALS = False

_C.MODEL.MASK_ON = False

_C.MODEL.KEYPOINT_ON = False

_C.MODEL.DEVICE = "cuda"

_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"

# Path (possibly with schema like catalog:// or detectron2://) to a checkpoint file

# to be loaded to the model. You can find available models in the model zoo.

_C.MODEL.WEIGHTS = ""

# Values to be used for image normalization (BGR order, since INPUT.FORMAT defaults to BGR).

# To train on images of different number of channels, just set different mean & std.

# Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675]

_C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675]

# When using pre-trained models in Detectron1 or any MSRA models,

# std has been absorbed into its conv1 weights, so the std needs to be set 1.

# Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)

_C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0]

# -----------------------------------------------------------------------------

# INPUT

# -----------------------------------------------------------------------------

_C.INPUT = CN()

# Size of the smallest side of the image during training

_C.INPUT.MIN_SIZE_TRAIN = (800,)

# Sample size of smallest side by choice or random selection from range give by

# INPUT.MIN_SIZE_TRAIN

_C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice"

# Maximum size of the side of the image during training

_C.INPUT.MAX_SIZE_TRAIN = 1333

# Size of the smallest side of the image during testing. Set to zero to disable resize in testing.

_C.INPUT.MIN_SIZE_TEST = 800

# Maximum size of the side of the image during testing

_C.INPUT.MAX_SIZE_TEST = 1333

# `True` if cropping is used for data augmentation during training

_C.INPUT.CROP = CN({"ENABLED": False})

# Cropping type:

# - "relative" crop (H * CROP.SIZE[0], W * CROP.SIZE[1]) part of an input of size (H, W)

# - "relative_range" uniformly sample relative crop size from between [CROP.SIZE[0], [CROP.SIZE[1]].

#   and  [1, 1] and use it as in "relative" scenario.

# - "absolute" crop part of an input with absolute size: (CROP.SIZE[0], CROP.SIZE[1]).

_C.INPUT.CROP.TYPE = "relative_range"

# Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of

# pixels if CROP.TYPE is "absolute"

_C.INPUT.CROP.SIZE = [0.9, 0.9]

# Whether the model needs RGB, YUV, HSV etc.

# Should be one of the modes defined here, as we use PIL to read the image:

# https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes

# with BGR being the one exception. One can set image format to BGR, we will

# internally use RGB for conversion and flip the channels over

_C.INPUT.FORMAT = "BGR"

# The ground truth mask format that the model will use.

# Mask R-CNN supports either "polygon" or "bitmask" as ground truth.

_C.INPUT.MASK_FORMAT = "polygon"  # alternative: "bitmask"

# -----------------------------------------------------------------------------

# Dataset

# -----------------------------------------------------------------------------

_C.DATASETS = CN()

# List of the dataset names for training. Must be registered in DatasetCatalog

_C.DATASETS.TRAIN = ()

# List of the pre-computed proposal files for training, which must be consistent

# with datasets listed in DATASETS.TRAIN.

_C.DATASETS.PROPOSAL_FILES_TRAIN = ()

# Number of top scoring precomputed proposals to keep for training

_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000

# List of the dataset names for testing. Must be registered in DatasetCatalog

_C.DATASETS.TEST = ()

# List of the pre-computed proposal files for test, which must be consistent

# with datasets listed in DATASETS.TEST.

_C.DATASETS.PROPOSAL_FILES_TEST = ()

# Number of top scoring precomputed proposals to keep for test

_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000

# -----------------------------------------------------------------------------

# DataLoader

# -----------------------------------------------------------------------------

_C.DATALOADER = CN()

# Number of data loading threads

_C.DATALOADER.NUM_WORKERS = 4

# If True, each batch should contain only images for which the aspect ratio

# is compatible. This groups portrait images together, and landscape images

# are not batched with portrait images.

_C.DATALOADER.ASPECT_RATIO_GROUPING = True

# Options: TrainingSampler, RepeatFactorTrainingSampler

_C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler"

# Repeat threshold for RepeatFactorTrainingSampler

_C.DATALOADER.REPEAT_THRESHOLD = 0.0

# if True, the dataloader will filter out images that have no associated

# annotations at train time.

_C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True

# ---------------------------------------------------------------------------- #

# Backbone options

# ---------------------------------------------------------------------------- #

_C.MODEL.BACKBONE = CN()

_C.MODEL.BACKBONE.NAME = "build_resnet_backbone"

# Freeze the first several stages so they are not trained.

# There are 5 stages in ResNet. The first is a convolution, and the following

# stages are each group of residual blocks.

_C.MODEL.BACKBONE.FREEZE_AT = 2

# ---------------------------------------------------------------------------- #

# FPN options

# ---------------------------------------------------------------------------- #

_C.MODEL.FPN = CN()

# Names of the input feature maps to be used by FPN

# They must have contiguous power of 2 strides

# e.g., ["res2", "res3", "res4", "res5"]

_C.MODEL.FPN.IN_FEATURES = []

_C.MODEL.FPN.OUT_CHANNELS = 256

# Options: "" (no norm), "GN"

_C.MODEL.FPN.NORM = ""

# Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg"

_C.MODEL.FPN.FUSE_TYPE = "sum"

# ---------------------------------------------------------------------------- #

# Proposal generator options

# ---------------------------------------------------------------------------- #

_C.MODEL.PROPOSAL_GENERATOR = CN()

# Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals"

_C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN"

# Proposal height and width both need to be greater than MIN_SIZE

# (a the scale used during training or inference)

_C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0

# ---------------------------------------------------------------------------- #

# Anchor generator options

# ---------------------------------------------------------------------------- #

_C.MODEL.ANCHOR_GENERATOR = CN()

# The generator can be any name in the ANCHOR_GENERATOR registry

_C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator"

# Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input.

# Format: list[list[int]]. SIZES[i] specifies the list of sizes

# to use for IN_FEATURES[i]; len(SIZES) == len(IN_FEATURES) must be true,

# or len(SIZES) == 1 is true and size list SIZES[0] is used for all

# IN_FEATURES.

_C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]]

# Anchor aspect ratios. For each area given in `SIZES`, anchors with different aspect

# ratios are generated by an anchor generator.

# Format: list[list[int]]. ASPECT_RATIOS[i] specifies the list of aspect ratios

# to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true,

# or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used

# for all IN_FEATURES.

_C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]

# Anchor angles.

# list[float], the angle in degrees, for each input feature map.

# ANGLES[i] specifies the list of angles for IN_FEATURES[i].

_C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]]

# Relative offset between the center of the first anchor and the top-left corner of the image

# Units: fraction of feature map stride (e.g., 0.5 means half stride)

# Allowed values are floats in [0, 1) range inclusive.

# Recommended value is 0.5, although it is not expected to affect model accuracy.

_C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0

# ---------------------------------------------------------------------------- #

# RPN options

# ---------------------------------------------------------------------------- #

_C.MODEL.RPN = CN()

_C.MODEL.RPN.HEAD_NAME = "StandardRPNHead"  # used by RPN_HEAD_REGISTRY

# Names of the input feature maps to be used by RPN

# e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN

_C.MODEL.RPN.IN_FEATURES = ["res4"]

# Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels

# Set to -1 or a large value, e.g. 100000, to disable pruning anchors

_C.MODEL.RPN.BOUNDARY_THRESH = -1

# IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD]

# Minimum overlap required between an anchor and ground-truth box for the

# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD

# ==> positive RPN example: 1)

# Maximum overlap allowed between an anchor and ground-truth box for the

# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD

# ==> negative RPN example: 0)

# Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD)

# are ignored (-1)

_C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7]

_C.MODEL.RPN.IOU_LABELS = [0, -1, 1]

# Total number of RPN examples per image

_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256

# Target fraction of foreground (positive) examples per RPN minibatch

_C.MODEL.RPN.POSITIVE_FRACTION = 0.5

# Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets

_C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)

# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.

_C.MODEL.RPN.SMOOTH_L1_BETA = 0.0

_C.MODEL.RPN.LOSS_WEIGHT = 1.0

# Number of top scoring RPN proposals to keep before applying NMS

# When FPN is used, this is *per FPN level* (not total)

_C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000

_C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000

# Number of top scoring RPN proposals to keep after applying NMS

# When FPN is used, this limit is applied per level and then again to the union

# of proposals from all levels

# NOTE: When FPN is used, the meaning of this config is different from Detectron1.

# It means per-batch topk in Detectron1, but per-image topk here.

# See "modeling/rpn/rpn_outputs.py" for details.

_C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000

_C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000

# NMS threshold used on RPN proposals

_C.MODEL.RPN.NMS_THRESH = 0.7

# ---------------------------------------------------------------------------- #

<上一页  1  2  3  4  下一页>  
声明: 本文由入驻维科号的作者撰写,观点仅代表作者本人,不代表OFweek立场。如有侵权或其他问题,请联系举报。

发表评论

0条评论,0人参与

请输入评论内容...

请输入评论/评论长度6~500个字

您提交的评论过于频繁,请输入验证码继续

暂无评论

暂无评论

人工智能 猎头职位 更多
扫码关注公众号
OFweek人工智能网
获取更多精彩内容
文章纠错
x
*文字标题:
*纠错内容:
联系邮箱:
*验 证 码:

粤公网安备 44030502002758号