AI4Bharat/IndicXlit

ValueError: mutable default for field common is not allowed: use default_factory

adsk2050 opened this issue · 1 comments

I am getting this error when initializing the model. This is the traceback:
`---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In[16], line 1
----> 1 e = XlitEngine(src_script_type="indic", beam_width=4, rescore=True)

File \Lib\site-packages\ai4bharat\transliteration\xlit_src.py:12, in XlitEngine(lang2use, beam_width, rescore, model_type, src_script_type)
10 elif src_script_type == "indic":
11 from .transformer import XlitEngineTransformer_Indic2En
---> 12 return XlitEngineTransformer_Indic2En(beam_width=beam_width, rescore=rescore)
14 elif model_type == "rnn":
15 assert src_script_type in {"roman", "latin", "en"}

File \Lib\site-packages\ai4bharat\transliteration\transformer\indic2en.py:60, in XlitEngineTransformer_Indic2En.init(self, beam_width, rescore)
57 else:
58 dicts_folder = None
---> 60 super().init(models_path, beam_width=beam_width, rescore=rescore)

File \Lib\site-packages\ai4bharat\transliteration\transformer\base_engine.py:45, in BaseEngineTransformer.init(self, models_path, beam_width, rescore)
42 lang_pairs_csv = ','.join(["en-"+lang for lang in self.all_supported_langs])
44 # initialize the model
---> 45 from .custom_interactive import Transliterator
46 self.transliterator = Transliterator(
47 os.path.join(models_path, CHARS_FOLDER),
48 os.path.join(models_path, MODEL_FILE),
(...)
51 beam = beam_width, batch_size = 32,
52 )
54 self.beam_width = beam_width

File \Lib\site-packages\ai4bharat\transliteration\transformer\custom_interactive.py:23
20 import numpy as np
21 import torch
---> 23 from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
24 from fairseq.dataclass.configs import FairseqConfig
25 from fairseq.dataclass.utils import convert_namespace_to_omegaconf

File \Lib\site-packages\fairseq_init_.py:20
17 all = ["pdb"]
19 # backwards compatibility to support from fairseq.X import Y
---> 20 from fairseq.distributed import utils as distributed_utils
21 from fairseq.logging import meters, metrics, progress_bar # noqa
23 sys.modules["fairseq.distributed_utils"] = distributed_utils

File \Lib\site-packages\fairseq\distributed_init_.py:7
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
6 from .distributed_timeout_wrapper import DistributedTimeoutWrapper
----> 7 from .fully_sharded_data_parallel import (
8 fsdp_enable_wrap,
9 fsdp_wrap,
10 FullyShardedDataParallel,
11 )
12 from .legacy_distributed_data_parallel import LegacyDistributedDataParallel
13 from .module_proxy_wrapper import ModuleProxyWrapper

File \Lib\site-packages\fairseq\distributed\fully_sharded_data_parallel.py:10
7 from typing import Optional
9 import torch
---> 10 from fairseq.dataclass.configs import DistributedTrainingConfig
11 from fairseq.distributed import utils as dist_utils
14 try:

File \Lib\site-packages\fairseq\dataclass_init_.py:6
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
----> 6 from .configs import FairseqDataclass
7 from .constants import ChoiceEnum
10 all = [
11 "FairseqDataclass",
12 "ChoiceEnum",
13 ]

File \Lib\site-packages\fairseq\dataclass\configs.py:1104
1095 ema_update_freq: int = field(
1096 default=1, metadata={"help": "Do EMA update every this many model updates"}
1097 )
1098 ema_fp32: bool = field(
1099 default=False,
1100 metadata={"help": "If true, store EMA model in fp32 even if model is in fp16"},
1101 )
-> 1104 @DataClass
1105 class FairseqConfig(FairseqDataclass):
1106 common: CommonConfig = CommonConfig()
1107 common_eval: CommonEvalConfig = CommonEvalConfig()

File \Lib\dataclasses.py:1230, in dataclass(cls, init, repr, eq, order, unsafe_hash, frozen, match_args, kw_only, slots, weakref_slot)
1227 return wrap
1229 # We're called as @DataClass without parens.
-> 1230 return wrap(cls)

File \Lib\dataclasses.py:1220, in dataclass..wrap(cls)
1219 def wrap(cls):
-> 1220 return _process_class(cls, init, repr, eq, order, unsafe_hash,
1221 frozen, match_args, kw_only, slots,
1222 weakref_slot)

File \Lib\dataclasses.py:958, in _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, match_args, kw_only, slots, weakref_slot)
955 kw_only = True
956 else:
957 # Otherwise it's a field of some type.
--> 958 cls_fields.append(_get_field(cls, name, type, kw_only))
960 for f in cls_fields:
961 fields[f.name] = f

File \Lib\dataclasses.py:815, in _get_field(cls, a_name, a_type, default_kw_only)
811 # For real fields, disallow mutable defaults. Use unhashable as a proxy
812 # indicator for mutability. Read the hash attribute from the class,
813 # not the instance.
814 if f._field_type is _FIELD and f.default.class.hash is None:
--> 815 raise ValueError(f'mutable default {type(f.default)} for field '
816 f'{f.name} is not allowed: use default_factory')
818 return f

ValueError: mutable default for field common is not allowed: use default_factory
`

This was caused due to some bug in python 3.11

I downgraded to python 3.9 and it started working again.