Skip to content

Commit 4ae8c63

Browse files
committed
pre-commit lint
1 parent ced5fb6 commit 4ae8c63

File tree

9 files changed

+19
-14
lines changed

9 files changed

+19
-14
lines changed

llama_stack/apis/inference/event_logger.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,12 @@
44
# This source code is licensed under the terms described in the LICENSE file in
55
# the root directory of this source tree.
66

7+
from termcolor import cprint
8+
79
from llama_stack.apis.inference import (
810
ChatCompletionResponseEventType,
911
ChatCompletionResponseStreamChunk,
1012
)
11-
from termcolor import cprint
1213

1314

1415
class LogEvent:

llama_stack/cli/model/describe.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,12 @@
99

1010
from llama_models.sku_list import resolve_model
1111

12+
from termcolor import colored
13+
1214
from llama_stack.cli.subcommand import Subcommand
1315
from llama_stack.cli.table import print_table
1416
from llama_stack.distribution.utils.serialize import EnumEncoder
1517

16-
from termcolor import colored
17-
1818

1919
class ModelDescribe(Subcommand):
2020
"""Show details about a model"""

llama_stack/cli/stack/run.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ def _run_stack_run_cmd(self, args: argparse.Namespace) -> None:
4646

4747
import pkg_resources
4848
import yaml
49+
4950
from llama_stack.distribution.build import ImageType
5051
from llama_stack.distribution.utils.config_dirs import BUILDS_BASE_DIR
5152

llama_stack/distribution/configure.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,10 @@
99
from pydantic import BaseModel
1010

1111
from llama_stack.distribution.datatypes import * # noqa: F403
12+
from prompt_toolkit import prompt
13+
from prompt_toolkit.validation import Validator
14+
from termcolor import cprint
15+
1216
from llama_stack.apis.memory.memory import MemoryBankType
1317
from llama_stack.distribution.distribution import (
1418
api_providers,
@@ -21,9 +25,6 @@
2125
from llama_stack.providers.impls.meta_reference.safety.config import (
2226
MetaReferenceShieldType,
2327
)
24-
from prompt_toolkit import prompt
25-
from prompt_toolkit.validation import Validator
26-
from termcolor import cprint
2728

2829

2930
def make_routing_entry_type(config_class: Any):

llama_stack/distribution/server/server.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -435,13 +435,13 @@ async def healthcheck():
435435
apis_to_serve = set(config.apis_to_serve)
436436
else:
437437
apis_to_serve = set(impls.keys())
438-
438+
439439
for api_str in apis_to_serve:
440440
api = Api(api_str)
441441

442442
endpoints = all_endpoints[api]
443443
impl = impls[api]
444-
444+
445445
provider_spec = specs[api]
446446
if (
447447
isinstance(provider_spec, RemoteProviderSpec)

llama_stack/distribution/utils/config_dirs.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,9 @@
88
from pathlib import Path
99

1010

11-
LLAMA_STACK_CONFIG_DIR = Path(os.getenv("LLAMA_STACK_CONFIG_DIR", os.path.expanduser("~/.llama/")))
11+
LLAMA_STACK_CONFIG_DIR = Path(
12+
os.getenv("LLAMA_STACK_CONFIG_DIR", os.path.expanduser("~/.llama/"))
13+
)
1214

1315
DISTRIBS_BASE_DIR = LLAMA_STACK_CONFIG_DIR / "distributions"
1416

llama_stack/distribution/utils/dynamic.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
from typing import Any, Dict
99

1010
from llama_stack.distribution.datatypes import * # noqa: F403
11-
from termcolor import cprint
1211

1312

1413
def instantiate_class_type(fully_qualified_name):

llama_stack/providers/adapters/inference/together/together.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@
1515
from together import Together
1616

1717
from llama_stack.apis.inference import * # noqa: F403
18+
from llama_stack.distribution.request_headers import get_request_provider_data
1819
from llama_stack.providers.utils.inference.augment_messages import (
1920
augment_messages_for_tools,
2021
)
21-
from llama_stack.distribution.request_headers import get_request_provider_data
2222

2323
from .config import TogetherImplConfig
2424

llama_stack/providers/impls/meta_reference/inference/quantization/loader.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,16 +14,17 @@
1414

1515
from fairscale.nn.model_parallel.mappings import reduce_from_model_parallel_region
1616
from llama_models.llama3.api.model import Transformer, TransformerBlock
17+
18+
from termcolor import cprint
19+
from torch import Tensor
20+
1721
from llama_stack.apis.inference import QuantizationType
1822

1923
from llama_stack.apis.inference.config import (
2024
CheckpointQuantizationFormat,
2125
MetaReferenceImplConfig,
2226
)
2327

24-
from termcolor import cprint
25-
from torch import Tensor
26-
2728

2829
def is_fbgemm_available() -> bool:
2930
try:

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy