Skip to content

Commit e783f0c

Browse files
jianzswuisawesome
authored andcommitted
[MISC] Use string annotation types for class definitions (vllm-project#17244)
Signed-off-by: Jade Zheng <[email protected]>
1 parent 20e9e16 commit e783f0c

File tree

2 files changed

+4
-10
lines changed

2 files changed

+4
-10
lines changed

vllm/platforms/cuda.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,6 @@
2121

2222
if TYPE_CHECKING:
2323
from vllm.config import ModelConfig, VllmConfig
24-
else:
25-
ModelConfig = None
26-
VllmConfig = None
2724

2825
logger = init_logger(__name__)
2926

@@ -109,7 +106,7 @@ def log_warnings(cls):
109106
pass
110107

111108
@classmethod
112-
def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
109+
def check_and_update_config(cls, vllm_config: "VllmConfig") -> None:
113110
parallel_config = vllm_config.parallel_config
114111
scheduler_config = vllm_config.scheduler_config
115112
compilation_config = vllm_config.compilation_config
@@ -308,7 +305,7 @@ def supports_fp8(cls) -> bool:
308305
return cls.has_device_capability(89)
309306

310307
@classmethod
311-
def supports_v1(cls, model_config: ModelConfig) -> bool:
308+
def supports_v1(cls, model_config: "ModelConfig") -> bool:
312309
return True
313310

314311
@classmethod

vllm/platforms/rocm.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,6 @@
1313

1414
if TYPE_CHECKING:
1515
from vllm.config import ModelConfig, VllmConfig
16-
else:
17-
ModelConfig = None
18-
VllmConfig = None
1916

2017
logger = init_logger(__name__)
2118

@@ -243,7 +240,7 @@ def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool:
243240
return True
244241

245242
@classmethod
246-
def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
243+
def check_and_update_config(cls, vllm_config: "VllmConfig") -> None:
247244
cache_config = vllm_config.cache_config
248245
if cache_config and cache_config.block_size is None:
249246
cache_config.block_size = 16
@@ -332,7 +329,7 @@ def fp8_dtype(cls) -> torch.dtype:
332329
return torch.float8_e4m3fn
333330

334331
@classmethod
335-
def supports_v1(cls, model_config: ModelConfig) -> bool:
332+
def supports_v1(cls, model_config: "ModelConfig") -> bool:
336333
# V1 support on AMD gpus is experimental
337334
return True
338335

0 commit comments

Comments
 (0)