CAMMA-public / TF-Cholec80

Library packaging the Cholec80 dataset for easy handling with Tensorflow.

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

Data loss error while executing TF cholec

mhymavathiprashanthi opened this issue · comments

I am trying to implement the code in my PC. Started with tfcholec . I am getting error ,please suggest what can be done .i have tried restart of tensor flow ,anaconda and my system as per Google and stackflow.I am using Python 3.8.5 . Can I think the error is because of that ?

Code executed:
import matplotlib.pyplot as plt
from tf_cholec80.dataset import make_cholec80
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

ds = make_cholec80(
12, # batch size
config_path="E:/IIITG/ELS/Technical/Code/TF-Cholec80-master/TF-Cholec80-master/tf_cholec80/configs/config.json", # path to dataset config file
video_ids=[0, 3, 78], # ids to pick
mode="FRAME" # operation mode
ds.element_spec

iterator = tf.compat.v1.data.make_one_shot_iterator(ds)
return_dict = iterator.get_next()

with tf.compat.v1.Session() as sess:
return_dict_np = sess.run(return_dict)

I am getting below error ,Is it because I am using Python 3.8.5


DataLossError Traceback (most recent call last)
~\anaconda3\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
1374 try:
-> 1375 return fn(*args)
1376 except errors.OpError as e:

~\anaconda3\lib\site-packages\tensorflow\python\client\session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
1358 self._extend_graph()
-> 1359 return self._call_tf_sessionrun(options, feed_dict, fetch_list,
1360 target_list, run_metadata)

~\anaconda3\lib\site-packages\tensorflow\python\client\session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
1450 run_metadata):
-> 1451 return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
1452 fetch_list, target_list,

DataLossError: corrupted record at 0
[[{{node IteratorGetNext}}]]

During handling of the above exception, another exception occurred:

DataLossError Traceback (most recent call last)
in
1 with tf.compat.v1.Session() as sess:
----> 2 return_dict_np = sess.run(return_dict)

~\anaconda3\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
965
966 try:
--> 967 result = self._run(None, fetches, feed_dict, options_ptr,
968 run_metadata_ptr)
969 if run_metadata:

~\anaconda3\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1188 # or if the call is a partial run that specifies feeds.
1189 if final_fetches or final_targets or (handle and feed_dict_tensor):
-> 1190 results = self._do_run(handle, final_targets, final_fetches,
1191 feed_dict_tensor, options, run_metadata)
1192 else:

~\anaconda3\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1366
1367 if handle is None:
-> 1368 return self._do_call(_run_fn, feeds, fetches, targets, options,
1369 run_metadata)
1370 else:

~\anaconda3\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
1392 '\nsession_config.graph_options.rewrite_options.'
1393 'disable_meta_optimizer = True')
-> 1394 raise type(e)(node_def, op, message)
1395
1396 def _extend_graph(self):

DataLossError: corrupted record at 0
[[node IteratorGetNext (defined at :2) ]]

Errors may have originated from an input operation.
Input Source operations connected to node IteratorGetNext:
OneShotIterator (defined at :1)

Original stack trace for 'IteratorGetNext':
File "C:\Users\mhpra\anaconda3\lib\runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\mhpra\anaconda3\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Users\mhpra\anaconda3\lib\site-packages\ipykernel_launcher.py", line 16, in
app.launch_new_instance()
File "C:\Users\mhpra\anaconda3\lib\site-packages\traitlets\config\application.py", line 845, in launch_instance
app.start()
File "C:\Users\mhpra\anaconda3\lib\site-packages\ipykernel\kernelapp.py", line 612, in start
self.io_loop.start()
File "C:\Users\mhpra\anaconda3\lib\site-packages\tornado\platform\asyncio.py", line 149, in start
self.asyncio_loop.run_forever()
File "C:\Users\mhpra\anaconda3\lib\asyncio\base_events.py", line 570, in run_forever
self._run_once()
File "C:\Users\mhpra\anaconda3\lib\asyncio\base_events.py", line 1859, in _run_once
handle._run()
File "C:\Users\mhpra\anaconda3\lib\asyncio\events.py", line 81, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\mhpra\anaconda3\lib\site-packages\tornado\ioloop.py", line 690, in
lambda f: self._run_callback(functools.partial(callback, future))
File "C:\Users\mhpra\anaconda3\lib\site-packages\tornado\ioloop.py", line 743, in _run_callback
ret = callback()
File "C:\Users\mhpra\anaconda3\lib\site-packages\tornado\gen.py", line 787, in inner
self.run()
File "C:\Users\mhpra\anaconda3\lib\site-packages\tornado\gen.py", line 748, in run
yielded = self.gen.send(value)
File "C:\Users\mhpra\anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 365, in process_one
yield gen.maybe_future(dispatch(*args))
File "C:\Users\mhpra\anaconda3\lib\site-packages\tornado\gen.py", line 209, in wrapper
yielded = next(result)
File "C:\Users\mhpra\anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 268, in dispatch_shell
yield gen.maybe_future(handler(stream, idents, msg))
File "C:\Users\mhpra\anaconda3\lib\site-packages\tornado\gen.py", line 209, in wrapper
yielded = next(result)
File "C:\Users\mhpra\anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 543, in execute_request
self.do_execute(
File "C:\Users\mhpra\anaconda3\lib\site-packages\tornado\gen.py", line 209, in wrapper
yielded = next(result)
File "C:\Users\mhpra\anaconda3\lib\site-packages\ipykernel\ipkernel.py", line 306, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\Users\mhpra\anaconda3\lib\site-packages\ipykernel\zmqshell.py", line 536, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "C:\Users\mhpra\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2877, in run_cell
result = self._run_cell(
File "C:\Users\mhpra\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2923, in _run_cell
return runner(coro)
File "C:\Users\mhpra\anaconda3\lib\site-packages\IPython\core\async_helpers.py", line 68, in pseudo_sync_runner
coro.send(None)
File "C:\Users\mhpra\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3146, in run_cell_async
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
File "C:\Users\mhpra\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3338, in run_ast_nodes
if (await self.run_code(code, result, async
=asy)):
File "C:\Users\mhpra\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3418, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "", line 2, in
return_dict = iterator.get_next()
File "C:\Users\mhpra\anaconda3\lib\site-packages\tensorflow\python\data\ops\iterator_ops.py", line 416, in get_next
flat_ret = gen_dataset_ops.iterator_get_next(
File "C:\Users\mhpra\anaconda3\lib\site-packages\tensorflow\python\ops\gen_dataset_ops.py", line 2747, in iterator_get_next
_, _, _op, _outputs = _op_def_library._apply_op_helper(
File "C:\Users\mhpra\anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 748, in _apply_op_helper
op = g._create_op_internal(op_type_name, inputs, dtypes=None,
File "C:\Users\mhpra\anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 3557, in _create_op_internal
ret = Operation(
File "C:\Users\mhpra\anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 2045, in init
self._traceback = tf_stack.extract_stack_for_node(self._c_op)