1

I am trying to implement Aleju's Imgaug to TFOD API. Noticed that you can not iterate through Tensors in the graph mode . I looked up for the solution and tried many suggestions but neither of them worked for my case. Do you know any work around?

import imgaug.augmenters as iaa
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
from tensorflow.python.framework.ops import EagerTensor
import tensorflow.compat.v1 as tf
import numpy as np

augseq = iaa.Sequential([# augmentation options], random_order=True)

@tf.function
def augment(image, boxes):
    image_np = image.numpy().astype(np.uint8) if type(image) == EagerTensor else image
    boxes_np = boxes.numpy() if type(boxes) == EagerTensor else boxes
    width, height, _ = image_np.shape
    bbs = []
    for i in range(len(boxes_np)):
        box = boxes_np[i]
        ymin, xmin, ymax, xmax = box.numpy()
        bbs.append(BoundingBox(
            x1=xmin*width, y1=ymin*height,
            x2=xmax*width, y2=ymax*height,))
    bbs = BoundingBoxesOnImage(bbs, shape=image_np.shape)
    image_aug, bbs_aug = augseq(image=image_np, bounding_boxes=bbs) # float np.ndarray
    bbs_aug = bbs_aug.remove_out_of_image().clip_out_of_image()
    
    boxes_aug = []
    for bb in bbs_aug:
        boxes_aug.append([bb.y1/height, bb.x1/width, bb.y2/height, bb.x2/width])
    boxes_aug = np.array(boxes_aug)
    
    return image_aug, boxes_aug

Stack Trace:

raceback (most recent call last):
  File "/content/models/research/object_detection/model_main_tf2.py", line 115, in <module>
    tf.compat.v1.app.run()
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/platform/app.py", line 40, in run
    _run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
  File "/usr/local/lib/python3.7/dist-packages/absl/app.py", line 303, in run
    _run_main(main, args)
  File "/usr/local/lib/python3.7/dist-packages/absl/app.py", line 251, in _run_main
    sys.exit(main(argv))
  File "/content/models/research/object_detection/model_main_tf2.py", line 112, in main
    record_summaries=FLAGS.record_summaries)
  File "/usr/local/lib/python3.7/dist-packages/object_detection/model_lib_v2.py", line 558, in train_loop
    train_dataset_fn)
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/deprecation.py", line 348, in new_func
    return func(*args, **kwargs)
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py", line 1199, in experimental_distribute_datasets_from_function
    return self.distribute_datasets_from_function(dataset_fn, options)
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py", line 1191, in distribute_datasets_from_function
    dataset_fn, options)
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/tpu_strategy.py", line 979, in _distribute_datasets_from_function
    options=options)
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/input_lib.py", line 181, in get_distributed_datasets_from_function
    build=build,
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/input_lib.py", line 1618, in __init__
    self.build()
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/input_lib.py", line 1639, in build
    self._input_contexts, self._input_workers, self._dataset_fn))
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/input_lib.py", line 2350, in _create_datasets_from_function_with_input_context
    dataset = dataset_fn(ctx)
  File "/usr/local/lib/python3.7/dist-packages/object_detection/model_lib_v2.py", line 553, in train_dataset_fn
    input_context=input_context)
  File "/usr/local/lib/python3.7/dist-packages/object_detection/inputs.py", line 906, in train_input
    reduce_to_frame_fn=reduce_to_frame_fn)
  File "/usr/local/lib/python3.7/dist-packages/object_detection/builders/dataset_builder.py", line 258, in build
    batch_size, input_reader_config)
  File "/usr/local/lib/python3.7/dist-packages/object_detection/builders/dataset_builder.py", line 237, in dataset_map_fn
    fn_to_map, num_parallel_calls=num_parallel_calls)
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/deprecation.py", line 348, in new_func
    return func(*args, **kwargs)
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/dataset_ops.py", line 3886, in map_with_legacy_function
    use_legacy_function=True))
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/dataset_ops.py", line 5505, in __init__
    use_legacy_function=use_legacy_function)
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/dataset_ops.py", line 4540, in __init__
    self._function.add_to_graph(ops.get_default_graph())
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/function.py", line 544, in add_to_graph
    self._create_definition_if_needed()
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/function.py", line 380, in _create_definition_if_needed
    self._create_definition_if_needed_impl()
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/function.py", line 407, in _create_definition_if_needed_impl
    capture_resource_var_by_value=self._capture_resource_var_by_value)
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/function.py", line 970, in func_graph_from_py_func
    outputs = func(*func_graph.inputs)
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/dataset_ops.py", line 4458, in wrapped_fn
    ret = wrapper_helper(*args)
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/dataset_ops.py", line 4440, in wrapper_helper
    ret = autograph.tf_convert(self._func, ag_ctx)(*nested_args)
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/autograph/impl/api.py", line 699, in wrapper
    raise e.ag_error_metadata.to_exception(e)
AttributeError: in user code:

    File "/usr/local/lib/python3.7/dist-packages/object_detection/inputs.py", line 886, in transform_and_pad_input_data_fn  *
        tensor_dict = pad_input_data_to_static_shapes(
    File "/usr/local/lib/python3.7/dist-packages/object_detection/inputs.py", line 272, in transform_input_data  *
        out_tensor_dict = data_augmentation_fn(out_tensor_dict)
    File "/usr/local/lib/python3.7/dist-packages/object_detection/inputs.py", line 623, in augment_input_data  *
        tensor_dict = preprocessor.preprocess(
    File "/usr/local/lib/python3.7/dist-packages/object_detection/core/preprocessor.py", line 4812, in preprocess  *
        results = func(*args, **params)
    File "/usr/local/lib/python3.7/dist-packages/object_detection/core/preprocessor.py", line 4422, in _adjust_imgaug  *
        adjusted_image, adjusted_boxes = tf.cast(imgaug_utils.augment(image,boxes), tf.float32)
    File "/usr/local/lib/python3.7/dist-packages/object_detection/core/imgaug_utils.py", line 24, in augment  *
        ymin, xmin, ymax, xmax = box.numpy()

    AttributeError: 'Tensor' object has no attribute 'numpy'

Here is what I tried and did not work:

  1. Enable eager execution(It is default in tf 2.x)
  2. Decorate/Not Decorate function with @tf.function.
  3. Create Tf session and try to eval() or run():
  • InvalidArgumentError: You must feed a value for placeholder tensor 'while/Placeholder' with dtype int32
  1. Tried on both TPU and CPU

1 Answers1

0

Now there exists tf.range()

A brief example


# here is a 3d tensor. 
x = tf.convert_to_tensor(np.array([[1,1],[1,1],[1,1]],[[2,2],[2,2],[2,2]]))

#Now loop over its largest parts

length = x.shape[0]
for n in tf.range(length):
    do_some_naugty_things(x[n])

starball
  • 20,030
  • 7
  • 43
  • 238
ArtemJDS
  • 31
  • 2