ScalaConsultants/Aspect-Based-Sentiment-Analysis

tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,529] = 529 is not in [0, 512) [Op:ResourceGather]

chetanniradwar opened this issue · 0 comments

I search on internet and find that this issue is due the length of the 'text' I am passing to the below nlp function.

import aspect_based_sentiment_analysis as absa
name = 'absa/classifier-lapt-0.2'
recognizer = absa.aux_models.BasicPatternRecognizer()
nlp = absa.load(name,pattern_recognizer=recognizer)
text= " " #some multiline long review text
completed_task = nlp(text, aspects=['camera','design'])
camera,design = completed_task.examples

print(camera.sentiment)

when I reduced the size of the text. It is working fine. Any other solution to this

Traceback (most recent call last):
---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
<ipython-input-28-07dbd638851c> in <module>
     17 aspect='camera'
     18 camera=0
---> 19 completed_task = nlp(text, aspects=['camera','design'])
     20 camera,design = completed_task.examples

~\AppData\Local\Programs\Python\Python37\lib\site-packages\aspect_based_sentiment_analysis\pipelines.py in __call__(self, text, aspects)
    206     def __call__(self, text: str, aspects: List[str]) -> CompletedTask:
    207         task = self.preprocess(text, aspects)
--> 208         predictions = self.transform(task.examples)
    209         completed_task = self.postprocess(task, predictions)
    210         return completed_task

~\AppData\Local\Programs\Python\Python37\lib\site-packages\aspect_based_sentiment_analysis\pipelines.py in transform(self, examples)
    222         tokenized_examples = self.tokenize(examples)
    223         input_batch = self.encode(tokenized_examples)
--> 224         output_batch = self.predict(input_batch)
    225         predictions = self.review(tokenized_examples, output_batch)
    226         return predictions

~\AppData\Local\Programs\Python\Python37\lib\site-packages\aspect_based_sentiment_analysis\pipelines.py in predict(self, input_batch)
    252                 token_ids=input_batch.token_ids,
    253                 attention_mask=input_batch.attention_mask,
--> 254                 token_type_ids=input_batch.token_type_ids
    255             )
    256             # We assume that our predictions are correct. This is

~\AppData\Local\Programs\Python\Python37\lib\site-packages\aspect_based_sentiment_analysis\models.py in call(self, token_ids, attention_mask, token_type_ids, training, **bert_kwargs)
    144             token_type_ids=token_type_ids,
    145             training=training,
--> 146             **bert_kwargs
    147         )
    148         sequence_output, pooled_output, hidden_states, attentions = outputs

~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in __call__(self, *args, **kwargs)
    966           with base_layer_utils.autocast_context_manager(
    967               self._compute_dtype):
--> 968             outputs = self.call(cast_inputs, *args, **kwargs)
    969           self._handle_activity_regularization(inputs, outputs)
    970           self._set_mask_metadata(inputs, outputs, input_masks)

~\AppData\Local\Programs\Python\Python37\lib\site-packages\transformers\modeling_tf_bert.py in call(self, inputs, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, training)
    564             # head_mask = tf.constant([0] * self.num_hidden_layers)
    565 
--> 566         embedding_output = self.embeddings([input_ids, position_ids, token_type_ids, inputs_embeds], training=training)
    567         encoder_outputs = self.encoder([embedding_output, extended_attention_mask, head_mask], training=training)
    568 

~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in __call__(self, *args, **kwargs)
    966           with base_layer_utils.autocast_context_manager(
    967               self._compute_dtype):
--> 968             outputs = self.call(cast_inputs, *args, **kwargs)
    969           self._handle_activity_regularization(inputs, outputs)
    970           self._set_mask_metadata(inputs, outputs, input_masks)

~\AppData\Local\Programs\Python\Python37\lib\site-packages\transformers\modeling_tf_bert.py in call(self, inputs, mode, training)
    146         """
    147         if mode == "embedding":
--> 148             return self._embedding(inputs, training=training)
    149         elif mode == "linear":
    150             return self._linear(inputs)

~\AppData\Local\Programs\Python\Python37\lib\site-packages\transformers\modeling_tf_bert.py in _embedding(self, inputs, training)
    169         if inputs_embeds is None:
    170             inputs_embeds = tf.gather(self.word_embeddings, input_ids)
--> 171         position_embeddings = self.position_embeddings(position_ids)
    172         token_type_embeddings = self.token_type_embeddings(token_type_ids)
    173 

~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in __call__(self, *args, **kwargs)
    966           with base_layer_utils.autocast_context_manager(
    967               self._compute_dtype):
--> 968             outputs = self.call(cast_inputs, *args, **kwargs)
    969           self._handle_activity_regularization(inputs, outputs)
    970           self._set_mask_metadata(inputs, outputs, input_masks)

~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\layers\embeddings.py in call(self, inputs)
    182     if dtype != 'int32' and dtype != 'int64':
    183       inputs = math_ops.cast(inputs, 'int32')
--> 184     out = embedding_ops.embedding_lookup(self.embeddings, inputs)
    185     return out
    186 

~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\ops\embedding_ops.py in embedding_lookup(params, ids, partition_strategy, name, validate_indices, max_norm)
    324       name=name,
    325       max_norm=max_norm,
--> 326       transform_fn=None)
    327 
    328 

~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\ops\embedding_ops.py in _embedding_lookup_and_transform(params, ids, partition_strategy, name, max_norm, transform_fn)
    135       with ops.colocate_with(params[0]):
    136         result = _clip(
--> 137             array_ops.gather(params[0], ids, name=name), ids, max_norm)
    138         if transform_fn:
    139           result = transform_fn(result)

~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\util\dispatch.py in wrapper(*args, **kwargs)
    178     """Call target, and fall back on dispatchers if there is a TypeError."""
    179     try:
--> 180       return target(*args, **kwargs)
    181     except (TypeError, ValueError):
    182       # Note: convert_to_eager_tensor currently raises a ValueError, not a

~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\ops\array_ops.py in gather(***failed resolving arguments***)
   4520     # TODO(apassos) find a less bad way of detecting resource variables
   4521     # without introducing a circular dependency.
-> 4522     return params.sparse_read(indices, name=name)
   4523   except AttributeError:
   4524     return gen_array_ops.gather_v2(params, indices, axis, name=name)

~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\ops\resource_variable_ops.py in sparse_read(self, indices, name)
    674       variable_accessed(self)
    675       value = gen_resource_variable_ops.resource_gather(
--> 676           self._handle, indices, dtype=self._dtype, name=name)
    677 
    678       if self._dtype == dtypes.variant:

~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\ops\gen_resource_variable_ops.py in resource_gather(resource, indices, dtype, batch_dims, validate_indices, name)
    554         pass  # Add nodes to the TensorFlow graph.
    555     except _core._NotOkStatusException as e:
--> 556       _ops.raise_from_not_ok_status(e, name)
    557   # Add nodes to the TensorFlow graph.
    558   dtype = _execute.make_type(dtype, "dtype")

~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\framework\ops.py in raise_from_not_ok_status(e, name)
   6651   message = e.message + (" name: " + name if name is not None else "")
   6652   # pylint: disable=protected-access
-> 6653   six.raise_from(core._status_to_exception(e.code, message), None)
   6654   # pylint: enable=protected-access
   6655 

~\AppData\Local\Programs\Python\Python37\lib\site-packages\six.py in raise_from(value, from_value)

InvalidArgumentError: indices[0,529] = 529 is not in [0, 512) [Op:ResourceGather]