基于h5py的使用及数据封装代码

作者:沈子恒 时间:2021-10-22 08:45:39 

1. h5py简单介绍

h5py文件是存放两类对象的容器,数据集(dataset)和组(group),dataset类似数组类的数据集合,和numpy的数组差不多。group是像文件夹一样的容器,它好比python中的字典,有键(key)和值(value)。group中可以存放dataset或者其他的group。”键”就是组成员的名称,”值”就是组成员对象本身(组或者数据集),下面来看下如何创建组和数据集。

1.1 创建一个h5py文件


import h5py
#要是读取文件的话,就把w换成r
f=h5py.File("myh5py.hdf5","w")

在当前目录下会生成一个myh5py.hdf5文件。

2. 创建dataset数据集


import h5py
f=h5py.File("myh5py.hdf5","w")
#deset1是数据集的name,(20,)代表数据集的shape,i代表的是数据集的元素类型
d1=f.create_dataset("dset1", (20,), 'i')
for key in f.keys():
print(key)
print(f[key].name)
print(f[key].shape)
print(f[key].value)

输出:


dset1
/dset1
(20,)
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
import h5py
import numpy as np
f=h5py.File("myh5py.hdf5","w")
a=np.arange(20)
d1=f.create_dataset("dset1",data=a)
for key in f.keys():
print(f[key].name)
print(f[key].value)

输出:


/dset1
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19]
2. hpf5用于封装训练集和测试集
#============================================================
# This prepare the hdf5 datasets of the DRIVE database
#============================================================

import os
import h5py
import numpy as np
from PIL import Image

def write_hdf5(arr,outfile):
with h5py.File(outfile,"w") as f:
f.create_dataset("image", data=arr, dtype=arr.dtype)

#------------Path of the images --------------------------------------------------------------
#train
original_imgs_train = "./DRIVE/training/images/"
groundTruth_imgs_train = "./DRIVE/training/1st_manual/"
borderMasks_imgs_train = "./DRIVE/training/mask/"
#test
original_imgs_test = "./DRIVE/test/images/"
groundTruth_imgs_test = "./DRIVE/test/1st_manual/"
borderMasks_imgs_test = "./DRIVE/test/mask/"
#---------------------------------------------------------------------------------------------

Nimgs = 20
channels = 3
height = 584
width = 565
dataset_path = "./DRIVE_datasets_training_testing/"

def get_datasets(imgs_dir,groundTruth_dir,borderMasks_dir,train_test="null"):
imgs = np.empty((Nimgs,height,width,channels))
groundTruth = np.empty((Nimgs,height,width))
border_masks = np.empty((Nimgs,height,width))
for path, subdirs, files in os.walk(imgs_dir): #list all files, directories in the path
 for i in range(len(files)):
  #original
  print "original image: " +files[i]
  img = Image.open(imgs_dir+files[i])
  imgs[i] = np.asarray(img)
  #corresponding ground truth
  groundTruth_name = files[i][0:2] + "_manual1.gif"
  print "ground truth name: " + groundTruth_name
  g_truth = Image.open(groundTruth_dir + groundTruth_name)
  groundTruth[i] = np.asarray(g_truth)
  #corresponding border masks
  border_masks_name = ""
  if train_test=="train":
   border_masks_name = files[i][0:2] + "_training_mask.gif"
  elif train_test=="test":
   border_masks_name = files[i][0:2] + "_test_mask.gif"
  else:
   print "specify if train or test!!"
   exit()
  print "border masks name: " + border_masks_name
  b_mask = Image.open(borderMasks_dir + border_masks_name)
  border_masks[i] = np.asarray(b_mask)

print "imgs max: " +str(np.max(imgs))
print "imgs min: " +str(np.min(imgs))
assert(np.max(groundTruth)==255 and np.max(border_masks)==255)
assert(np.min(groundTruth)==0 and np.min(border_masks)==0)
print "ground truth and border masks are correctly withih pixel value range 0-255 (black-white)"
#reshaping for my standard tensors
imgs = np.transpose(imgs,(0,3,1,2))
assert(imgs.shape == (Nimgs,channels,height,width))
groundTruth = np.reshape(groundTruth,(Nimgs,1,height,width))
border_masks = np.reshape(border_masks,(Nimgs,1,height,width))
assert(groundTruth.shape == (Nimgs,1,height,width))
assert(border_masks.shape == (Nimgs,1,height,width))
return imgs, groundTruth, border_masks

if not os.path.exists(dataset_path):
os.makedirs(dataset_path)
#getting the training datasets
imgs_train, groundTruth_train, border_masks_train = get_datasets(original_imgs_train,groundTruth_imgs_train,borderMasks_imgs_train,"train")
print "saving train datasets"
write_hdf5(imgs_train, dataset_path + "DRIVE_dataset_imgs_train.hdf5")
write_hdf5(groundTruth_train, dataset_path + "DRIVE_dataset_groundTruth_train.hdf5")
write_hdf5(border_masks_train,dataset_path + "DRIVE_dataset_borderMasks_train.hdf5")

#getting the testing datasets
imgs_test, groundTruth_test, border_masks_test = get_datasets(original_imgs_test,groundTruth_imgs_test,borderMasks_imgs_test,"test")
print "saving test datasets"
write_hdf5(imgs_test,dataset_path + "DRIVE_dataset_imgs_test.hdf5")
write_hdf5(groundTruth_test, dataset_path + "DRIVE_dataset_groundTruth_test.hdf5")
write_hdf5(border_masks_test,dataset_path + "DRIVE_dataset_borderMasks_test.hdf5")

遍历文件夹下的所有文件 os.walk( dir )


for parent, dir_names, file_names in os.walk(parent_dir):
for i in file_names:
 print file_name

parent: 父路径

dir_names: 子文件夹

file_names: 文件名

来源:https://blog.csdn.net/shenziheng1/article/details/80675264

标签:h5py,数据,封装
0
投稿

猜你喜欢

  • Python中的字符串切片(截取字符串)的详解

    2023-07-23 20:37:59
  • PHPMyadmin2.10中文显示为乱码的解决办法

    2007-08-22 08:18:00
  • asp长文章分页显示思路

    2007-08-23 13:54:00
  • mysql主从库不同步问题解决方法

    2024-01-26 03:57:19
  • opencv与numpy的图像基本操作

    2022-06-20 12:20:10
  • 将Django框架和遗留的Web应用集成的方法

    2023-04-11 11:18:52
  • Python整型运算之布尔型、标准整型、长整型操作示例

    2021-01-28 06:39:15
  • Pandas数据分析之批量拆分/合并Excel

    2023-03-28 19:39:58
  • SQL Server分析服务性能优化浅析

    2010-01-16 13:30:00
  • 关于python中time和datetime的区别与用法

    2022-07-15 00:54:17
  • python3.5 tkinter实现页面跳转

    2022-08-03 22:23:46
  • Python列表计数及插入实例

    2023-05-26 23:41:12
  • SQLServer2005安装提示服务无法启动原因分析及解决

    2024-01-16 03:02:26
  • python实现微信自动回复机器人功能

    2023-12-30 01:01:40
  • 详解用 python-docx 创建浮动图片

    2021-07-16 13:32:12
  • centos7环境下二进制安装包安装 mysql5.6的方法详解

    2024-01-26 23:37:33
  • python集合的新增元素方法整理

    2022-10-13 02:45:28
  • python元组和字典的内建函数实例详解

    2021-09-13 09:34:56
  • python PyQt5/Pyside2 按钮右击菜单实例代码

    2023-03-11 15:03:19
  • 简单了解Python3里的一些新特性

    2022-09-22 21:32:23
  • asp之家 网络编程 m.aspxhome.com