#coding=utf-8
importtensorflowastfimportosos.environ["CUDA_VISIBLE_DEVICES"]="9"os.environ['TF_CPP_MIN_LOG_LEVEL']='3'###指定日志输出级别,3:只显示error
#file = '/home/caodai/VOC2012/voc_train.tfrecords'
defdataset_input_fn():filenames='/home/caodai/VOC2012/voc_train.tfrecords'dataset=tf.data.TFRecordDataset(filenames)# Use `tf.parse_single_example()` to extract data from a `tf.Example`
# protocol buffer, and perform any additional per-record preprocessing.
defparser(record):keys_to_features={'img_orginal':tf.FixedLenFeature([],tf.string),'img_segmentation':tf.FixedLenFeature([],tf.string)}parsed=tf.parse_single_example(record,keys_to_features)# Perform additional preprocessing on the parsed data.
image=tf.decode_raw(parsed["img_orginal"],tf.uint8)image=tf.reshape(image,[500,500,3])label=tf.decode_raw(parsed["img_segmentation"],tf.int32)label=tf.reshape(label,[500,500,1])returnimage,label# Use `Dataset.map()` to build a pair of a feature dictionary and a label
# tensor for each example.
dataset=dataset.map(parser)dataset=dataset.shuffle(buffer_size=10000)dataset=dataset.batch(32)dataset=dataset.repeat(10)iterator=dataset.make_one_shot_iterator()returniterator# `features` is a dictionary in which each value is a batch of values for
# that feature; `labels` is a batch of labels.
# features, labels = iterator.get_next()
# return features, labels
iterator=dataset_input_fn()a1,b1=iterator.get_next()# a2, b2 = iterator.get_next()
sess=tf.Session()printa1### 两次显示的结果不同,a1是个迭代器,自动更新。
printsess.run(a1[0,0:10,0:10,0])printsess.run(a1[0,0:10,0:10,0])# print sess.run(a2[0,0:10,0:10,0])
# print a1[0,0:10,0:10,0].eval(session=sess)