fix: lru cache HF get model params (#105)

pull/106/head
Laurel Orr 11 months ago committed by GitHub
parent b745617045
commit fd7fbc9e35
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,5 +1,9 @@
0.1.9 - Unreleased
---------------------
Fixed
^^^^^
* Added trust code params HF models
* Added LRU cache to HF model param calls to avoid extra calls
0.1.8 - 2023-05-22
---------------------

@ -1,5 +1,6 @@
"""Diffuser client."""
import logging
from functools import lru_cache
from typing import Any, Dict, Optional
import numpy as np
@ -79,6 +80,7 @@ class DiffuserClient(Client):
"""
return False
@lru_cache(maxsize=1)
def get_model_params(self) -> Dict:
"""
Get model params.

@ -1,5 +1,6 @@
"""Hugging Face client."""
import logging
from functools import lru_cache
from typing import Any, Dict, Optional
import requests
@ -73,6 +74,7 @@ class HuggingFaceClient(Client):
"""
return False
@lru_cache(maxsize=1)
def get_model_params(self) -> Dict:
"""
Get model params.

@ -1,5 +1,6 @@
"""Hugging Face client."""
import logging
from functools import lru_cache
from typing import Any, Dict, Optional, Tuple
import numpy as np
@ -65,6 +66,7 @@ class HuggingFaceEmbeddingClient(Client):
"""
return False
@lru_cache(maxsize=1)
def get_model_params(self) -> Dict:
"""
Get model params.

Loading…
Cancel
Save