diff --git a/pgml-extension/requirements.txt b/pgml-extension/requirements.txt index ca21fc0fb..382be4155 100644 --- a/pgml-extension/requirements.txt +++ b/pgml-extension/requirements.txt @@ -1,6 +1,7 @@ accelerate==0.19.0 -datasets==2.10.1 +datasets==2.12.0 deepspeed==0.8.1 +huggingface-hub==0.14.1 InstructorEmbedding lightgbm pandas==1.5.3 @@ -14,6 +15,6 @@ sentence-transformers==2.2.2 torch==1.13.1 torchaudio==0.13.1 torchvision==0.14.1 -tqdm==4.64.1 -transformers==4.28.1 +tqdm==4.65.0 +transformers==4.29.1 xgboost diff --git a/pgml-extension/src/bindings/transformers.py b/pgml-extension/src/bindings/transformers.py index ad97171f1..8334532d1 100644 --- a/pgml-extension/src/bindings/transformers.py +++ b/pgml-extension/src/bindings/transformers.py @@ -43,6 +43,44 @@ __cache_transform_pipeline_by_task = {} +DTYPE_MAP = { + "uint8": torch.uint8, + "int8": torch.int8, + "int16": torch.int16, + "int32": torch.int32, + "int64": torch.int64, + "bfloat16": torch.bfloat16, + "float16": torch.float16, + "float32": torch.float32, + "float64": torch.float64, + "complex64": torch.complex64, + "complex128": torch.complex128, + "bool": torch.bool, +} + + +def convert_dtype(kwargs): + if "torch_dtype" in kwargs: + kwargs["torch_dtype"] = DTYPE_MAP[kwargs["torch_dtype"]] + + +def convert_eos_token(tokenizer, args): + if "eos_token" in args: + args["eos_token_id"] = tokenizer.convert_tokens_to_ids(args.pop("eos_token")) + else: + args["eos_token_id"] = tokenizer.eos_token_id + + +def ensure_device(kwargs): + device = kwargs.get("device") + device_map = kwargs.get("device_map") + if device is None and device_map is None: + if torch.cuda.is_available(): + kwargs["device"] = "cuda:" + str(os.getpid() % torch.cuda.device_count()) + else: + kwargs["device"] = "cpu" + + class NumpyJSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.float32): @@ -55,9 +93,10 @@ def transform(task, args, inputs): args = json.loads(args) inputs = json.loads(inputs) + key = ",".join([f"{key}:{val}" for (key, val) in sorted(task.items())]) ensure_device(task) + convert_dtype(task) - key = ",".join([f"{key}:{val}" for (key, val) in sorted(task.items())]) if key not in __cache_transform_pipeline_by_task: __cache_transform_pipeline_by_task[key] = transformers.pipeline(**task) pipe = __cache_transform_pipeline_by_task[key] @@ -65,6 +104,8 @@ def transform(task, args, inputs): if pipe.task == "question-answering": inputs = [json.loads(input) for input in inputs] + convert_eos_token(pipe.tokenizer, args) + return json.dumps(pipe(inputs, **args), cls=NumpyJSONEncoder) @@ -540,12 +581,3 @@ def generate(model_id, data, config): return all_preds -def ensure_device(kwargs): - device = kwargs.get("device") - device_map = kwargs.get("device_map") - if device is None and device_map is None: - if torch.cuda.is_available(): - kwargs["device"] = "cuda:" + str(os.getpid() % torch.cuda.device_count()) - else: - kwargs["device"] = "cpu" - pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy