Trying to train with .jpg files instead of video fails
Closed this issue · 1 comments
Hi,
I have been trying to train the model, with image instead of video and I get an issue while trying to train multiple images from multiple folders. Here, folder names are the classes.
Here is my code:
def create_manual_data():
dataset = []
image_paths = []
path = './techDB/'
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp)
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
dataset.append(ImageClass(class_name, image_paths))
if os.path.isdir(facedir):
images = os.listdir(facedir)
image_paths = [os.path.join(facedir,img) for img in images]
img = cv2.imread(image_paths[i])
new_name = class_name;
f = open('./facerec_128D.txt','r');
data_set = json.loads(f.read());
person_imgs = {"Left" : [], "Right": [], "Center": []};
person_features = {"Left" : [], "Right": [], "Center": []};
rects, landmarks = face_detect.detect_face(img, 80); # min face size is set to 80x80
for (i, rect) in enumerate(rects):
aligned_frame, pos = aligner.align(160,img,landmarks[i]);
if len(aligned_frame) == 160 and len(aligned_frame[0]) == 160:
person_imgs[pos].append(aligned_frame)
cv2.imshow("Captured face", aligned_frame)
for pos in person_imgs: #there r some exceptions here, but I'll just leave it as this to keep it simple
person_features[pos] = [np.mean(extract_feature.get_features(person_imgs[pos]),axis=0).tolist()]
data_set[new_name] = person_features;
f = open('./facerec_128D.txt', 'w');
f.write(json.dumps(data_set))
The error that I get when I hit python main.py --mode "input" is:
Traceback (most recent call last):
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1322, in _do_call
return fn(*args)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1307, in _run_fn
options, feed_dict, fetch_list, target_list, run_metadata)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1409, in _call_tf_sessionrun
run_metadata)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Reshape cannot infer the missing input size for an empty tensor unless all specified input sizes are non-zero
[[Node: InceptionResnetV1/Logits/Flatten/flatten/Reshape = Reshape[T=DT_FLOAT, Tshape=DT_INT32, _device="/job:localhost/replica:0/task:0/device:CPU:0"](InceptionResnetV1/Logits/AvgPool_1a_8x8/AvgPool, InceptionResnetV1/Logits/Flatten/flatten/Reshape/shape)]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "main.py", line 178, in
main(args);
File "main.py", line 29, in main
create_manual_data();
File "main.py", line 161, in create_manual_data
person_features[pos] = [np.mean(extract_feature.get_features(person_imgs[pos]),axis=0).tolist()]
File "C:\Users\TECHOLUTION\FaceRec\face_feature.py", line 31, in get_features
return self.sess.run(self.embeddings, feed_dict = {self.x : images})
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 900, in run
run_metadata_ptr)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1135, in _run
feed_dict_tensor, options, run_metadata)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1316, in _do_run
run_metadata)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1335, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Reshape cannot infer the missing input size for an empty tensor unless all specified input sizes are non-zero
[[Node: InceptionResnetV1/Logits/Flatten/flatten/Reshape = Reshape[T=DT_FLOAT, Tshape=DT_INT32, _device="/job:localhost/replica:0/task:0/device:CPU:0"](InceptionResnetV1/Logits/AvgPool_1a_8x8/AvgPool, InceptionResnetV1/Logits/Flatten/flatten/Reshape/shape)]]
Caused by op 'InceptionResnetV1/Logits/Flatten/flatten/Reshape', defined at:
File "main.py", line 176, in
extract_feature = FaceFeature(FRGraph)
File "C:\Users\TECHOLUTION\FaceRec\face_feature.py", line 22, in init
resnet.inference(self.x, 0.6, phase_train=False)[0], 1, 1e-10); #some magic numbers that u dont have to care about
File "C:\Users\TECHOLUTION\FaceRec\architecture\inception_resnet_v1.py", line 155, in inference
reuse=reuse)
File "C:\Users\TECHOLUTION\FaceRec\architecture\inception_resnet_v1.py", line 236, in inception_resnet_v1
net = slim.flatten(net)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\contrib\framework\python\ops\arg_scope.py", line 183, in func_with_args
return func(*args, **current_args)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\contrib\layers\python\layers\layers.py", line 1490, in flatten
outputs = core_layers.flatten(inputs)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\layers\core.py", line 415, in flatten
return layer.apply(inputs)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\layers\base.py", line 828, in apply
return self.call(inputs, *args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\layers\base.py", line 717, in call
outputs = self.call(inputs, *args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\layers\core.py", line 376, in call
outputs = array_ops.reshape(inputs, (array_ops.shape(inputs)[0], -1))
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 7323, in reshape
"Reshape", tensor=tensor, shape=shape, name=name)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 3392, in create_op
op_def=op_def)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 1718, in init
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): Reshape cannot infer the missing input size for an empty tensor unless all specified input sizes are non-zero
[[Node: InceptionResnetV1/Logits/Flatten/flatten/Reshape = Reshape[T=DT_FLOAT, Tshape=DT_INT32, _device="/job:localhost/replica:0/task:0/device:CPU:0"](InceptionResnetV1/Logits/AvgPool_1a_8x8/AvgPool, InceptionResnetV1/Logits/Flatten/flatten/Reshape/shape)]]
Any suggestion on how to train the model with images instead of video, would be much appreciated.
vs = cv2.VideoCapture("images/WIN_20190325_15_03_45_Pro.mp4");
getting this error in face_feature
def get_features(self, input_imgs):
images = load_data_list(input_imgs,160)
feed_dict = {self.x: images, self.phase_train_placeholder: False}
return self.sess.run(self.embeddings, feed_dict = feed_dict)
Input to reshape is a tensor with 3 values, but the requested shape has 32