TF2.0 API学习(Python)八:dataset.py dataset类解读

dataset.py

Dataset类的定义和编写

class Dataset(object):
    """implement Dataset here"""
    def __init__(self, dataset_type):
        self.annot_path  = cfg.TRAIN.ANNOT_PATH if dataset_type == 'train' else cfg.TEST.ANNOT_PATH
        self.input_sizes = cfg.TRAIN.INPUT_SIZE if dataset_type == 'train' else cfg.TEST.INPUT_SIZE
        self.batch_size  = cfg.TRAIN.BATCH_SIZE if dataset_type == 'train' else cfg.TEST.BATCH_SIZE
        self.data_aug    = cfg.TRAIN.DATA_AUG   if dataset_type == 'train' else cfg.TEST.DATA_AUG

        self.train_input_sizes = cfg.TRAIN.INPUT_SIZE
        self.strides = np.array(cfg.YOLO.STRIDES)
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.max_bbox_per_scale = 150

        self.annotations = self.load_annotations(dataset_type)
        self.num_samples = len(self.annotations)
        self.num_batchs = int(np.ceil(self.num_samples / self.batch_size))
        self.batch_count = 0

    def __len__(self):
        return self.num_batchs

1、tf.device

我们可以使用 tf.device() 指定模型运行的具体设备,可以指定运行在GPU还是CUP上,以及哪块GPU上。

2、random.choice

方法返回一个列表,元组或字符串的随机项

3、random.shuffle

方法将序列的所有元素随机排序。

4、Label Smooth:

通过软化传统的one-hot类型标签,使得在计算损失值时能够有效抑制过拟合现象。
label smooth 相当于减少真实样本标签的类别在计算损失函数时的权重,最终起到抑制过拟合的效果。
在这里插入图片描述

for bbox in bboxes:
    # 标签框坐标值
    bbox_coor = bbox[:4]
    # 分类标号
    bbox_class_ind = bbox[4]
    # 将分类标签值转化为独热编码
    onehot = np.zeros(self.num_classes, dtype=np.float)
    onehot[bbox_class_ind] = 1.0
    #Label Smooth
    uniform_distribution = np.full(self.num_classes, 1.0 / self.num_classes)
    deta = 0.01
    smooth_onehot = onehot * (1 - deta) + deta * uniform_distribution

5、np.floor

返回小于某数的最大整数

6、np.concatenate

类似tf.concat

7、np.logical_or

过滤坐标值溢出的坐标

import numpy as np 
pred_coor = [[1,2,3,4],
             [3,4,2,3],
             [5,6,7,8]]
pre = np.array(pred_coor)
mask = np.logical_or((pre[:, 0] > pre[:, 2]), (pre[:, 1] > pre[:, 3]))
print(mask)
pre[mask] = 0
print(pre)
[False  True False]
[[1 2 3 4]
 [0 0 0 0]
 [5 6 7 8]]

8、np.argmax

取出最大值对应的索引值

bbox_xywh = np.concatenate([(bbox_coor[2:] + bbox_coor[:2]) * 0.5, bbox_coor[2:] - bbox_coor[:2]], axis=-1)
       
label = [np.zeros((self.train_output_sizes[i], self.train_output_sizes[i], self.anchor_per_scale,
                  5 + self.num_classes)) for i in range(3)]
# max_bbox_per_scale = 150

bboxes_xywh = [np.zeros((self.max_bbox_per_scale, 4)) for _ in range(3)]
              if np.any(iou_mask):
xind, yind = np.floor(bbox_xywh_scaled[i, 0:2]).astype(np.int32)

label[i][yind, xind, iou_mask, :] = 0
label[i][yind, xind, iou_mask, 0:4] = bbox_xywh
label[i][yind, xind, iou_mask, 4:5] = 1.0
label[i][yind, xind, iou_mask, 5:] = smooth_onehot

# bbox_count = np.zeros((3,))
bbox_ind = int(bbox_count[i] % self.max_bbox_per_scale)
bboxes_xywh[i][bbox_ind, :4] = bbox_xywh

猜你喜欢

转载自blog.csdn.net/Winds_Up/article/details/113413025