V2EX = way to explore
V2EX 是一个关于分享和探索的地方
现在注册
已注册用户请  登录
V2EX 提问指南
timmycheung666
V2EX  ›  问与答

当我复制 ESRGAN 相关程序时,我遇到了以下问题。libpng error: Read Error AttributeError: 'NoneType' object has no attribute 'astype'

  •  1
     
  •   timmycheung666 · 2019-08-06 20:25:17 +08:00 · 1857 次点击
    这是一个创建于 1887 天前的主题,其中的信息可能已经有所发展或是发生改变。
    Traceback (most recent call last):
    File "/sda/ZTL/B/codes/train.py", line 173, in <module>
    main()
    File "/sda/ZTL/B/codes/train.py", line 97, in main
    for _, train_data in enumerate(train_loader):
    File "/root/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 637, in __next__
    return self._process_next_batch(batch)
    File "/root/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 658, in _process_next_batch
    raise batch.exc_type(batch.exc_msg)
    AttributeError: Traceback (most recent call last):
    File "/root/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 138, in _worker_loop
    samples = collate_fn([dataset[i] for i in batch_indices])
    File "/root/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 138, in <listcomp>
    samples = collate_fn([dataset[i] for i in batch_indices])
    File "/sda/ZTL/B/data/LRHR_dataset.py", line 51, in __getitem__
    img_HR = util.read_img(self.HR_env, HR_path)
    File "/sda/ZTL/B/data/util.py", line 79, in read_img
    img = img.astype(np.float32) / 255.
    AttributeError: 'NoneType' object has no attribute 'astype'

    File "/sda/ZTL/B/data/util.py", line 79, in read_img
    def read_img(env, path):
    # read image by cv2 or from lmdb
    # return: Numpy float32, HWC, BGR, [0,1]
    if env is None: # img
    img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
    else:
    img = _read_lmdb_img(env, path)
    img = img.astype(np.float32) / 255.
    if img.ndim == 2:
    img = np.expand_dims(img, axis=2)
    # some images have 4 channels
    if img.shape[2] > 3:
    img = img[:, :, :3]
    return img
    找到了错误代码行所在处
    File "/sda/ZTL/B/data/LRHR_dataset.py", line 51, in __getitem__
    def __getitem__(self, index):
    HR_path, LR_path = None, None
    scale = self.opt['scale']
    HR_size = self.opt['HR_size']

    # get HR image
    HR_path = self.paths_HR[index]
    img_HR = util.read_img(self.HR_env, HR_path)
    # modcrop in the validation / test phase
    if self.opt['phase'] != 'train':
    img_HR = util.modcrop(img_HR, scale)
    # change color space if necessary
    if self.opt['color']:
    img_HR = util.channel_convert(img_HR.shape[2], self.opt['color'], [img_HR])[0]

    # get LR image
    if self.paths_LR:
    LR_path = self.paths_LR[index]
    img_LR = util.read_img(self.LR_env, LR_path)
    else: # down-sampling on-the-fly
    # randomly scale during training
    if self.opt['phase'] == 'train':
    random_scale = random.choice(self.random_scale_list)
    H_s, W_s, _ = img_HR.shape

    def _mod(n, random_scale, scale, thres):
    rlt = int(n * random_scale)
    rlt = (rlt // scale) * scale
    return thres if rlt < thres else rlt

    H_s = _mod(H_s, random_scale, scale, HR_size)
    W_s = _mod(W_s, random_scale, scale, HR_size)
    img_HR = cv2.resize(np.copy(img_HR), (W_s, H_s), interpolation=cv2.INTER_LINEAR)
    # force to 3 channels
    if img_HR.ndim == 2:
    img_HR = cv2.cvtColor(img_HR, cv2.COLOR_GRAY2BGR)

    H, W, _ = img_HR.shape
    # using matlab imresize
    img_LR = util.imresize_np(img_HR, 1 / scale, True)
    if img_LR.ndim == 2:
    img_LR = np.expand_dims(img_LR, axis=2)

    if self.opt['phase'] == 'train':
    # if the image size is too small
    H, W, _ = img_HR.shape
    if H < HR_size or W < HR_size:
    img_HR = cv2.resize(
    np.copy(img_HR), (HR_size, HR_size), interpolation=cv2.INTER_LINEAR)
    # using matlab imresize
    img_LR = util.imresize_np(img_HR, 1 / scale, True)
    if img_LR.ndim == 2:
    img_LR = np.expand_dims(img_LR, axis=2)
    print(img_LR)

    H, W, C = img_LR.shape
    LR_size = HR_size // scale

    # randomly crop
    rnd_h = random.randint(0, max(0, H - LR_size))
    rnd_w = random.randint(0, max(0, W - LR_size))
    img_LR = img_LR[rnd_h:rnd_h + LR_size, rnd_w:rnd_w + LR_size, :]
    rnd_h_HR, rnd_w_HR = int(rnd_h * scale), int(rnd_w * scale)
    img_HR = img_HR[rnd_h_HR:rnd_h_HR + HR_size, rnd_w_HR:rnd_w_HR + HR_size, :]

    # augmentation - flip, rotate
    img_LR, img_HR = util.augment([img_LR, img_HR], self.opt['use_flip'], \
    self.opt['use_rot'])

    # change color space if necessary
    if self.opt['color']:
    img_LR = util.channel_convert(C, self.opt['color'], [img_LR])[0] # TODO during val no definetion

    # BGR to RGB, HWC to CHW, numpy to tensor
    if img_HR.shape[2] == 3:
    img_HR = img_HR[:, :, [2, 1, 0]]
    img_LR = img_LR[:, :, [2, 1, 0]]
    img_HR = torch.from_numpy(np.ascontiguousarray(np.transpose(img_HR, (2, 0, 1)))).float()
    img_LR = torch.from_numpy(np.ascontiguousarray(np.transpose(img_LR, (2, 0, 1)))).float()

    if LR_path is None:
    LR_path = HR_path
    return {'LR': img_LR, 'HR': img_HR, 'LR_path': LR_path, 'HR_path': HR_path}
    我感觉是读照片时出了问题。self.HR_env 没有图片读入,我不知道这是出了什么问题。我不知道如何处理这个问题,求大神指导。我运行这个程序是在 NVIDIA 特斯拉 P100GPU 上运行的
    3 条回复    2019-08-24 10:23:32 +08:00
    timmycheung666
        1
    timmycheung666  
    OP
       2019-08-10 13:07:34 +08:00
    求大神解答啊!!!
    timmycheung666
        2
    timmycheung666  
    OP
       2019-08-19 20:25:07 +08:00
    求求各位大佬 有没有人解答一下啊
    timmycheung666
        3
    timmycheung666  
    OP
       2019-08-24 10:23:32 +08:00
    嘻嘻!这个问题虽然没有人解答,可是我自己解决了,主要就是数据集的问题,图片格式不对,删掉错格式图片就好啦!
    关于   ·   帮助文档   ·   博客   ·   API   ·   FAQ   ·   实用小工具   ·   2221 人在线   最高记录 6679   ·     Select Language
    创意工作者们的社区
    World is powered by solitude
    VERSION: 3.9.8.5 · 39ms · UTC 09:17 · PVG 17:17 · LAX 02:17 · JFK 05:17
    Developed with CodeLauncher
    ♥ Do have faith in what you're doing.