Get difference of foreign key attribute with django - django

I'm trying to get a queryset where a Task has two Issuances which have been created within 10 seconds of one another.
Models are as follows:
Class Task(models.Model):
# stuff
Class Issuance(models.Model):
task = models.ForeignKey(Task, blank=True, null=True, on_delete=models.SET_NULL)
created = models.DateTimeField(default=timezone.now)
What I've got so far:
qs = (
Task.objects
.annotate(count=Count('issuance'))
.filter(count__gt=1, count__lte=2)
.annotate(time_difference=F('issuance__created')) # Need to fix this
.annotate(
dupe=Case(
When(
time_difference__lt=10, # Less than 10 seconds
then=Value(1),
),
default=Value(0),
output=BooleanField(),
)
)
)
I think I'm pretty close but I need some way to calculate the time difference between the creation date of the two issuances for any one Task. Also, need to do a comparison that the time delta is less than 10 seconds; not sure if what I have will work.
Can anyone help, please?
EDIT: Added output of query
TypeError Traceback (most recent call last)
<ipython-input-47-e0e60776551e> in <module>()
11 ),
12 default=Value(0),
---> 13 output=BooleanField(),
14 )
15 )
/usr/local/lib/python3.6/site-packages/django/db/models/query.py in annotate(self, *args, **kwargs)
912 raise ValueError("The annotation '%s' conflicts with a field on "
913 "the model." % alias)
--> 914 clone.query.add_annotation(annotation, alias, is_summary=False)
915
916 for alias, annotation in clone.query.annotations.items():
/usr/local/lib/python3.6/site-packages/django/db/models/sql/query.py in add_annotation(self, annotation, alias, is_summary)
969 """
970 annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None,
--> 971 summarize=is_summary)
972 self.append_annotation_mask([alias])
973 self.annotations[alias] = annotation
/usr/local/lib/python3.6/site-packages/django/db/models/expressions.py in resolve_expression(self, query, allow_joins, reuse, summarize, for_save)
827 c.is_summary = summarize
828 for pos, case in enumerate(c.cases):
--> 829 c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
830 c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
831 return c
/usr/local/lib/python3.6/site-packages/django/db/models/expressions.py in resolve_expression(self, query, allow_joins, reuse, summarize, for_save)
760 c.is_summary = summarize
761 if hasattr(c.condition, 'resolve_expression'):
--> 762 c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
763 c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
764 return c
/usr/local/lib/python3.6/site-packages/django/db/models/query_utils.py in resolve_expression(self, query, allow_joins, reuse, summarize, for_save)
79 # We must promote any new joins to left outer joins so that when Q is
80 # used as an expression, rows aren't filtered due to joins.
---> 81 clause, joins = query._add_q(self, reuse, allow_joins=allow_joins, split_subq=False)
82 query.promote_joins(joins)
83 return clause
/usr/local/lib/python3.6/site-packages/django/db/models/sql/query.py in _add_q(self, q_object, used_aliases, branch_negated, current_negated, allow_joins, split_subq)
1251 child, can_reuse=used_aliases, branch_negated=branch_negated,
1252 current_negated=current_negated, connector=connector,
-> 1253 allow_joins=allow_joins, split_subq=split_subq,
1254 )
1255 joinpromoter.add_votes(needed_inner)
/usr/local/lib/python3.6/site-packages/django/db/models/sql/query.py in build_filter(self, filter_expr, branch_negated, current_negated, can_reuse, connector, allow_joins, split_subq)
1141 clause = self.where_class()
1142 if reffed_expression:
-> 1143 condition = self.build_lookup(lookups, reffed_expression, value)
1144 clause.add(condition, AND)
1145 return clause, []
/usr/local/lib/python3.6/site-packages/django/db/models/sql/query.py in build_lookup(self, lookups, lhs, rhs)
1081 lhs = self.try_transform(lhs, name, lookups)
1082 final_lookup = lhs.get_lookup('exact')
-> 1083 return final_lookup(lhs, rhs)
1084 lhs = self.try_transform(lhs, name, lookups)
1085 lookups = lookups[1:]
/usr/local/lib/python3.6/site-packages/django/db/models/lookups.py in __init__(self, lhs, rhs)
17 def __init__(self, lhs, rhs):
18 self.lhs, self.rhs = lhs, rhs
---> 19 self.rhs = self.get_prep_lookup()
20 if hasattr(self.lhs, 'get_bilateral_transforms'):
21 bilateral_transforms = self.lhs.get_bilateral_transforms()
/usr/local/lib/python3.6/site-packages/django/db/models/lookups.py in get_prep_lookup(self)
57 return self.rhs._prepare(self.lhs.output_field)
58 if self.prepare_rhs and hasattr(self.lhs.output_field, 'get_prep_value'):
---> 59 return self.lhs.output_field.get_prep_value(self.rhs)
60 return self.rhs
61
/usr/local/lib/python3.6/site-packages/django/db/models/fields/__init__.py in get_prep_value(self, value)
1415
1416 def get_prep_value(self, value):
-> 1417 value = super(DateTimeField, self).get_prep_value(value)
1418 value = self.to_python(value)
1419 if value is not None and settings.USE_TZ and timezone.is_naive(value):
/usr/local/lib/python3.6/site-packages/django/db/models/fields/__init__.py in get_prep_value(self, value)
1273 def get_prep_value(self, value):
1274 value = super(DateField, self).get_prep_value(value)
-> 1275 return self.to_python(value)
1276
1277 def get_db_prep_value(self, value, connection, prepared=False):
/usr/local/lib/python3.6/site-packages/django/db/models/fields/__init__.py in to_python(self, value)
1376
1377 try:
-> 1378 parsed = parse_datetime(value)
1379 if parsed is not None:
1380 return parsed
/usr/local/lib/python3.6/site-packages/django/utils/dateparse.py in parse_datetime(value)
91 Returns None if the input isn't well formatted.
92 """
---> 93 match = datetime_re.match(value)
94 if match:
95 kw = match.groupdict()
TypeError: expected string or bytes-like object

Related

django queryset error when use len(qs) TypeError: argument must be int or float

I dont know what happened with my db but now I can not len my queryset.
I can make a qs with a lot of obj with qs.SignalSma.objects.all()
But somehow I can not use len(qs) on that qs or make a loop with that qs
I am getting in this error if I try to do so.
In [9]: len(qs)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[9], line 1
----> 1 len(qs)
File ~\OneDrive\Desktop\dev-2023\signal\lib\site-packages\django\db\models\query.py:262, in QuerySet.__len__(self)
261 def __len__(self):
--> 262 self._fetch_all()
263 return len(self._result_cache)
File ~\OneDrive\Desktop\dev-2023\signal\lib\site-packages\django\db\models\query.py:1324, in QuerySet._fetch_all(self)
1322 def _fetch_all(self):
1323 if self._result_cache is None:
-> 1324 self._result_cache = list(self._iterable_class(self))
1325 if self._prefetch_related_lookups and not self._prefetch_done:
1326 self._prefetch_related_objects()
File ~\OneDrive\Desktop\dev-2023\signal\lib\site-packages\django\db\models\query.py:68, in ModelIterable.__iter__(self)
59 related_populators = get_related_populators(klass_info, select, db)
60 known_related_objects = [
61 (field, related_objs, operator.attrgetter(*[
62 field.attname
(...)
66 ])) for field, related_objs in queryset._known_related_objects.items()
67 ]
---> 68 for row in compiler.results_iter(results):
69 obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
70 for rel_populator in related_populators:
File ~\OneDrive\Desktop\dev-2023\signal\lib\site-packages\django\db\models\sql\compiler.py:1122, in SQLCompiler.apply_converters(self, rows, converters)
1120 value = row[pos]
1121 for converter in convs:
-> 1122 value = converter(value, expression, connection)
1123 row[pos] = value
1124 yield row
File ~\OneDrive\Desktop\dev-2023\signal\lib\site-packages\django\db\backends\sqlite3\operations.py:313, in DatabaseOperations.get_decimalfield_converter.<locals>.converter(value, expression, connection)
311 def converter(value, expression, connection):
312 if value is not None:
--> 313 return create_decimal(value).quantize(quantize_value, context=expression.output_field.context)
TypeError: argument must be int or float
Any idea what is happening?? and how can I fix this?
QuerySet objects have it's own counting method. Use it:
qs = SignalSma.objects.all()
qs.count() # returns number of objects inside the queryset

AttributeError: 'float' object has no attribute 'quantize' using django-firebird

I'm using:
Django 2.2.24,
django-firebird 2.2a1,
fdb 2.02,
my model:
class MyModel(models.Model):
...
total = models.DecimalField(max_digits=10, decimal_places=2, null=True)
...
When i run a simple query:
ml = MyModel.objects.values('id', 'total').last()
I got this error:
AttributeError: 'float' object has no attribute 'quantize'
I guess the issue is with converters from firebird/operations.py
Full traceback:
~/.pyenv/versions/crm-nov21/lib/python3.7/site-packages/django/db/models/query.py in last(self)
656 def last(self):
657 """Return the last object of a query or None if no match is found."""
--> 658 for obj in (self.reverse() if self.ordered else self.order_by('-pk'))[:1]:
659 return obj
660
~/.pyenv/versions/crm-nov21/lib/python3.7/site-packages/django/db/models/query.py in __iter__(self)
272 - Responsible for turning the rows into model objects.
273 """
--> 274 self._fetch_all()
275 return iter(self._result_cache)
276
~/.pyenv/versions/crm-nov21/lib/python3.7/site-packages/django/db/models/query.py in _fetch_all(self)
1240 def _fetch_all(self):
1241 if self._result_cache is None:
-> 1242 self._result_cache = list(self._iterable_class(self))
1243 if self._prefetch_related_lookups and not self._prefetch_done:
1244 self._prefetch_related_objects()
~/.pyenv/versions/crm-nov21/lib/python3.7/site-packages/django/db/models/query.py in __iter__(self)
111 ]
112 indexes = range(len(names))
--> 113 for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):
114 yield {names[i]: row[i] for i in indexes}
115
~/.pyenv/versions/crm-nov21/lib/python3.7/site-packages/django/db/models/sql/compiler.py in apply_converters(self, rows, converters)
1084 value = row[pos]
1085 for converter in convs:
-> 1086 value = converter(value, expression, connection)
1087 row[pos] = value
1088 yield row
~/.pyenv/versions/crm-nov21/lib/python3.7/site-packages/firebird/operations.py in convert_decimalfield_value(self, value, expression, connection, context)
304 field = expression.field
305
--> 306 val = utils.format_number(value, field.max_digits, field.decimal_places)
307 value = decimal.Decimal.from_float(float(val))
308 return value
~/.pyenv/versions/crm-nov21/lib/python3.7/site-packages/django/db/backends/utils.py in format_number(value, max_digits, decimal_places)
236 context.prec = max_digits
237 if decimal_places is not None:
--> 238 value = value.quantize(decimal.Decimal(1).scaleb(-decimal_places), context=context)
239 else:
240 context.traps[decimal.Rounded] = 1
AttributeError: 'float' object has no attribute 'quantize'
could someone help me please ?

Huggingface Bert TPU fine-tuning works on Colab but not in GCP

I'm trying to fine-tune a Huggingface transformers BERT model on TPU. It works in Colab but fails when I switch to a paid TPU on GCP. Jupyter notebook code is as follows:
[1] model = transformers.TFBertModel.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
# works
[2] cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu='[My TPU]',
zone='us-central1-a',
project='[My Project]'
)
tf.config.experimental_connect_to_cluster(cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
tpu_strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
#Also works. Got a bunch of startup messages from the TPU - all good.
[3] with tpu_strategy.scope():
model = TFBertModel.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
#Generates the error below (long). Same line works in Colab.
Here's the error message:
NotFoundError Traceback (most recent call last)
<ipython-input-14-2cfc1a238903> in <module>
1 with tpu_strategy.scope():
----> 2 model = TFBertModel.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
~/.local/lib/python3.5/site-packages/transformers/modeling_tf_utils.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
309 return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True)
310
--> 311 ret = model(model.dummy_inputs, training=False) # build the network with dummy inputs
312
313 assert os.path.isfile(resolved_archive_file), "Error retrieving file {}".format(resolved_archive_file)
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
820 with base_layer_utils.autocast_context_manager(
821 self._compute_dtype):
--> 822 outputs = self.call(cast_inputs, *args, **kwargs)
823 self._handle_activity_regularization(inputs, outputs)
824 self._set_mask_metadata(inputs, outputs, input_masks)
~/.local/lib/python3.5/site-packages/transformers/modeling_tf_bert.py in call(self, inputs, **kwargs)
688
689 def call(self, inputs, **kwargs):
--> 690 outputs = self.bert(inputs, **kwargs)
691 return outputs
692
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
820 with base_layer_utils.autocast_context_manager(
821 self._compute_dtype):
--> 822 outputs = self.call(cast_inputs, *args, **kwargs)
823 self._handle_activity_regularization(inputs, outputs)
824 self._set_mask_metadata(inputs, outputs, input_masks)
~/.local/lib/python3.5/site-packages/transformers/modeling_tf_bert.py in call(self, inputs, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, training)
548
549 embedding_output = self.embeddings([input_ids, position_ids, token_type_ids, inputs_embeds], training=training)
--> 550 encoder_outputs = self.encoder([embedding_output, extended_attention_mask, head_mask], training=training)
551
552 sequence_output = encoder_outputs[0]
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
820 with base_layer_utils.autocast_context_manager(
821 self._compute_dtype):
--> 822 outputs = self.call(cast_inputs, *args, **kwargs)
823 self._handle_activity_regularization(inputs, outputs)
824 self._set_mask_metadata(inputs, outputs, input_masks)
~/.local/lib/python3.5/site-packages/transformers/modeling_tf_bert.py in call(self, inputs, training)
365 all_hidden_states = all_hidden_states + (hidden_states,)
366
--> 367 layer_outputs = layer_module([hidden_states, attention_mask, head_mask[i]], training=training)
368 hidden_states = layer_outputs[0]
369
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
820 with base_layer_utils.autocast_context_manager(
821 self._compute_dtype):
--> 822 outputs = self.call(cast_inputs, *args, **kwargs)
823 self._handle_activity_regularization(inputs, outputs)
824 self._set_mask_metadata(inputs, outputs, input_masks)
~/.local/lib/python3.5/site-packages/transformers/modeling_tf_bert.py in call(self, inputs, training)
341 hidden_states, attention_mask, head_mask = inputs
342
--> 343 attention_outputs = self.attention([hidden_states, attention_mask, head_mask], training=training)
344 attention_output = attention_outputs[0]
345 intermediate_output = self.intermediate(attention_output)
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
820 with base_layer_utils.autocast_context_manager(
821 self._compute_dtype):
--> 822 outputs = self.call(cast_inputs, *args, **kwargs)
823 self._handle_activity_regularization(inputs, outputs)
824 self._set_mask_metadata(inputs, outputs, input_masks)
~/.local/lib/python3.5/site-packages/transformers/modeling_tf_bert.py in call(self, inputs, training)
290 input_tensor, attention_mask, head_mask = inputs
291
--> 292 self_outputs = self.self_attention([input_tensor, attention_mask, head_mask], training=training)
293 attention_output = self.dense_output([self_outputs[0], input_tensor], training=training)
294 outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
820 with base_layer_utils.autocast_context_manager(
821 self._compute_dtype):
--> 822 outputs = self.call(cast_inputs, *args, **kwargs)
823 self._handle_activity_regularization(inputs, outputs)
824 self._set_mask_metadata(inputs, outputs, input_masks)
~/.local/lib/python3.5/site-packages/transformers/modeling_tf_bert.py in call(self, inputs, training)
222
223 batch_size = shape_list(hidden_states)[0]
--> 224 mixed_query_layer = self.query(hidden_states)
225 mixed_key_layer = self.key(hidden_states)
226 mixed_value_layer = self.value(hidden_states)
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
820 with base_layer_utils.autocast_context_manager(
821 self._compute_dtype):
--> 822 outputs = self.call(cast_inputs, *args, **kwargs)
823 self._handle_activity_regularization(inputs, outputs)
824 self._set_mask_metadata(inputs, outputs, input_masks)
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/keras/layers/core.py in call(self, inputs)
1142 outputs = gen_math_ops.mat_mul(inputs, self.kernel)
1143 if self.use_bias:
-> 1144 outputs = nn.bias_add(outputs, self.bias)
1145 if self.activation is not None:
1146 return self.activation(outputs) # pylint: disable=not-callable
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/ops/nn_ops.py in bias_add(value, bias, data_format, name)
2756 else:
2757 return gen_nn_ops.bias_add(
-> 2758 value, bias, data_format=data_format, name=name)
2759
2760
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/ops/gen_nn_ops.py in bias_add(value, bias, data_format, name)
675 try:
676 return bias_add_eager_fallback(
--> 677 value, bias, data_format=data_format, name=name, ctx=_ctx)
678 except _core._SymbolicException:
679 pass # Add nodes to the TensorFlow graph.
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/ops/gen_nn_ops.py in bias_add_eager_fallback(value, bias, data_format, name, ctx)
703 data_format = "NHWC"
704 data_format = _execute.make_str(data_format, "data_format")
--> 705 _attr_T, _inputs_T = _execute.args_to_matching_eager([value, bias], ctx)
706 (value, bias) = _inputs_T
707 _inputs_flat = [value, bias]
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/execute.py in args_to_matching_eager(l, ctx, default_dtype)
265 dtype = ret[-1].dtype
266 else:
--> 267 ret = [ops.convert_to_tensor(t, dtype, ctx=ctx) for t in l]
268
269 # TODO(slebedev): consider removing this as it leaks a Keras concept.
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/execute.py in <listcomp>(.0)
265 dtype = ret[-1].dtype
266 else:
--> 267 ret = [ops.convert_to_tensor(t, dtype, ctx=ctx) for t in l]
268
269 # TODO(slebedev): consider removing this as it leaks a Keras concept.
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/framework/ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
1312
1313 if ret is None:
-> 1314 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1315
1316 if ret is NotImplemented:
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/distribute/values.py in _tensor_conversion_mirrored(var, dtype, name, as_ref)
1174 # allowing instances of the class to be used as tensors.
1175 def _tensor_conversion_mirrored(var, dtype=None, name=None, as_ref=False):
-> 1176 return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
1177
1178
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/distribute/values.py in _dense_var_to_tensor(self, dtype, name, as_ref)
908 if _enclosing_tpu_context() is None:
909 return super(TPUVariableMixin, self)._dense_var_to_tensor(
--> 910 dtype=dtype, name=name, as_ref=as_ref)
911 # pylint: enable=protected-access
912 elif dtype is not None and dtype != self.dtype:
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/distribute/values.py in _dense_var_to_tensor(self, dtype, name, as_ref)
1164 assert not as_ref
1165 return ops.convert_to_tensor(
-> 1166 self.get(), dtype=dtype, name=name, as_ref=as_ref)
1167
1168 def _clone_with_new_values(self, new_values):
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/distribute/values.py in get(self, device)
835 def get(self, device=None):
836 if (_enclosing_tpu_context() is None) or (device is not None):
--> 837 return super(TPUVariableMixin, self).get(device=device)
838 else:
839 raise NotImplementedError(
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/distribute/values.py in get(self, device)
320 device = distribute_lib.get_update_device()
321 if device is None:
--> 322 return self._get_cross_replica()
323 device = device_util.canonicalize(device)
324 return self._device_map.select_for_device(self._values, device)
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/distribute/values.py in _get_cross_replica(self)
1136 replica_id = self._device_map.replica_for_device(device)
1137 if replica_id is None:
-> 1138 return array_ops.identity(self.primary)
1139 return array_ops.identity(self._values[replica_id])
1140
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/util/dispatch.py in wrapper(*args, **kwargs)
178 """Call target, and fall back on dispatchers if there is a TypeError."""
179 try:
--> 180 return target(*args, **kwargs)
181 except (TypeError, ValueError):
182 # Note: convert_to_eager_tensor currently raises a ValueError, not a
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/ops/array_ops.py in identity(input, name)
265 # variables. Variables have correct handle data when graph building.
266 input = ops.convert_to_tensor(input)
--> 267 ret = gen_array_ops.identity(input, name=name)
268 # Propagate handle data for happier shape inference for resource variables.
269 if hasattr(input, "_handle_data"):
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/ops/gen_array_ops.py in identity(input, name)
3824 pass # Add nodes to the TensorFlow graph.
3825 except _core._NotOkStatusException as e:
-> 3826 _ops.raise_from_not_ok_status(e, name)
3827 # Add nodes to the TensorFlow graph.
3828 _, _, _op, _outputs = _op_def_library._apply_op_helper(
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/framework/ops.py in raise_from_not_ok_status(e, name)
6604 message = e.message + (" name: " + name if name is not None else "")
6605 # pylint: disable=protected-access
-> 6606 six.raise_from(core._status_to_exception(e.code, message), None)
6607 # pylint: enable=protected-access
6608
/usr/local/lib/python3.5/dist-packages/six.py in raise_from(value, from_value)
NotFoundError: '_MklMatMul' is neither a type of a primitive operation nor a name of a function registered in binary running on n-aa2fcfb7-w-0. One possible root cause is the client and server binaries are not built with the same version. Please make sure the operation or function is registered in the binary running in this process. [Op:Identity]
I posted this on the Huggingface github (https://github.com/huggingface/transformers/issues/2572) and they suggest the TPU server version may not match the TPU client version, but a) I don't know how to check for that nor b) what to do about it. Suggestions appreciated.

error: look-behind requires fixed-width pattern Spacy

I created a customized Spacy model using https://github.com/explosion/spaCy/blob/master/examples/training/train_ner.py and when I am loading this model. It shows an error - look-behind requires fixed-width pattern. I am confused about how to solve this issue. Please help me. Any help will be appreciated. Thanks in advance.
output_dir = 'NLP_entity/model'
print("Loading from", output_dir)
nlp2 = spacy.load("NLP_entity/model")
test_text = "Remove from account"
#print()
doc2 = nlp1(test_text)
print(test_text)
#print()
if doc2.ents:
for ent in doc2.ents:
print("entity = {}, text = {}".format(ent.label_, ent.text))
else:
print("Entities in None")
Error:
('Loading from', 'NLP_entity/model')
errorTraceback (most recent call last)
<ipython-input-1-94981b2ca322> in <module>()
2 output_dir = 'NLP_entity/model'
3 print("Loading from", output_dir)
----> 4 nlp2 = spacy.load("NLP_entity/model")
5 test_text = "Remove from account".decode("utf-8")
6 #print()
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/site-packages/spacy/__init__.pyc in load(name, **overrides)
25 if depr_path not in (True, False, None):
26 deprecation_warning(Warnings.W001.format(path=depr_path))
---> 27 return util.load_model(name, **overrides)
28
29
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/site-packages/spacy/util.pyc in load_model(name, **overrides)
131 return load_model_from_package(name, **overrides)
132 if Path(name).exists(): # path to model data directory
--> 133 return load_model_from_path(Path(name), **overrides)
134 elif hasattr(name, "exists"): # Path or Path-like to model data
135 return load_model_from_path(name, **overrides)
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/site-packages/spacy/util.pyc in load_model_from_path(model_path, meta, **overrides)
171 component = nlp.create_pipe(name, config=config)
172 nlp.add_pipe(component, name=name)
--> 173 return nlp.from_disk(model_path)
174
175
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/site-packages/spacy/language.pyc in from_disk(self, path, exclude, disable)
784 # Convert to list here in case exclude is (default) tuple
785 exclude = list(exclude) + ["vocab"]
--> 786 util.from_disk(path, deserializers, exclude)
787 self._path = path
788 return self
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/site-packages/spacy/util.pyc in from_disk(path, readers, exclude)
609 # Split to support file names like meta.json
610 if key.split(".")[0] not in exclude:
--> 611 reader(path / key)
612 return path
613
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/site-packages/spacy/language.pyc in <lambda>(p)
774 deserializers["meta.json"] = lambda p: self.meta.update(srsly.read_json(p))
775 deserializers["vocab"] = lambda p: self.vocab.from_disk(p) and _fix_pretrained_vectors_name(self)
--> 776 deserializers["tokenizer"] = lambda p: self.tokenizer.from_disk(p, exclude=["vocab"])
777 for name, proc in self.pipeline:
778 if name in exclude:
tokenizer.pyx in spacy.tokenizer.Tokenizer.from_disk()
tokenizer.pyx in spacy.tokenizer.Tokenizer.from_bytes()
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/re.pyc in compile(pattern, flags)
192 def compile(pattern, flags=0):
193 "Compile a regular expression pattern, returning a pattern object."
--> 194 return _compile(pattern, flags)
195
196 def purge():
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/re.pyc in _compile(*key)
249 p = sre_compile.compile(pattern, flags)
250 except error, v:
--> 251 raise error, v # invalid expression
252 if not bypass_cache:
253 if len(_cache) >= _MAXCACHE:
error: look-behind requires fixed-width pattern

CParseError while loading specific Seaborn datasets in a Jupyter notebook

I'm getting an error (below) when I try to load a couple of datasets, using:
import seaborn as sns
exercise = sns.load_dataset("exercise")
and
import seaborn as sns
titanic = sns.load_dataset("titanic")
It's weird, though, because iris = sns.load_dataset("iris") works great; what is causing the CParserError?
---------------------------------------------------------------------------
CParserError Traceback (most recent call last)
<ipython-input-4-6b85a4d6ff71> in <module>()
----> 1 exercise = sns.load_dataset("exercise")
2 #iris = sns.load_dataset("iris")
c:\python27\lib\site-packages\seaborn\utils.pyc in load_dataset(name, cache, data_home, **kws)
425 full_path = cache_path
426
--> 427 df = pd.read_csv(full_path, **kws)
428 if df.iloc[-1].isnull().all():
429 df = df.iloc[:-1]
c:\python27\lib\site-packages\pandas\io\parsers.pyc in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, escapechar, comment, encoding, dialect, tupleize_cols, error_bad_lines, warn_bad_lines, skip_footer, doublequote, delim_whitespace, as_recarray, compact_ints, use_unsigned, low_memory, buffer_lines, memory_map, float_precision)
560 skip_blank_lines=skip_blank_lines)
561
--> 562 return _read(filepath_or_buffer, kwds)
563
564 parser_f.__name__ = name
c:\python27\lib\site-packages\pandas\io\parsers.pyc in _read(filepath_or_buffer, kwds)
323 return parser
324
--> 325 return parser.read()
326
327 _parser_defaults = {
c:\python27\lib\site-packages\pandas\io\parsers.pyc in read(self, nrows)
813 raise ValueError('skip_footer not supported for iteration')
814
--> 815 ret = self._engine.read(nrows)
816
817 if self.options.get('as_recarray'):
c:\python27\lib\site-packages\pandas\io\parsers.pyc in read(self, nrows)
1312 def read(self, nrows=None):
1313 try:
-> 1314 data = self._reader.read(nrows)
1315 except StopIteration:
1316 if self._first_chunk:
pandas\parser.pyx in pandas.parser.TextReader.read (pandas\parser.c:8620)()
pandas\parser.pyx in pandas.parser.TextReader._read_low_memory (pandas\parser.c:8876)()
pandas\parser.pyx in pandas.parser.TextReader._read_rows (pandas\parser.c:9602)()
pandas\parser.pyx in pandas.parser.TextReader._tokenize_rows (pandas\parser.c:9470)()
pandas\parser.pyx in pandas.parser.raise_parser_error (pandas\parser.c:23295)()
CParserError: Error tokenizing data. C error: Expected 1 fields in line 24, saw 2