cerebras.modelzoo.data.vision.segmentation.config.Hdf5BaseDataProcessorConfig#
- class cerebras.modelzoo.data.vision.segmentation.config.Hdf5BaseDataProcessorConfig(batch_size: int = <object object at 0x7f9345f8db90>, shuffle: bool = True, shuffle_seed: int = 0, num_workers: int = 0, prefetch_factor: int = 10, persistent_workers: bool = True, use_worker_cache: bool = <object object at 0x7f9345f8db90>, data_dir: Union[str, List[str]] = <object object at 0x7f9345f8db90>, num_classes: int = <object object at 0x7f9345f8db90>, normalize_data_method: str = <object object at 0x7f9345f8db90>, image_shape: List[int] = <factory>, loss: str = <object object at 0x7f9345f8db90>, augment_data: bool = True, shuffle_buffer: Optional[int] = None, drop_last: bool = True, mixed_precision: Optional[bool] = None, use_fast_dataloader: bool = False, duplicate_act_worker_data: bool = False)[source]#
- use_worker_cache: bool = <object object>#
- data_dir: Union[str, List[str]] = <object object>#
- num_classes: int = <object object>#
- normalize_data_method: str = <object object>#
- image_shape: List[int]#
- loss: str = <object object>#
- augment_data: bool = True#
- shuffle_buffer: Optional[int] = None#
- num_workers: int = 0#
The number of PyTorch processes used in the dataloader
- drop_last: bool = True#
- prefetch_factor: int = 10#
The number of batches to prefetch in the dataloader
- persistent_workers: bool = True#
Whether or not to keep workers persistent between epochs
- mixed_precision: Optional[bool] = None#
- use_fast_dataloader: bool = False#
- duplicate_act_worker_data: bool = False#
- batch_size: int = <object object>#
Batch size to be used
- shuffle: bool = True#
Whether or not to shuffle the dataset
- shuffle_seed: int = 0#
Seed used for deterministic shuffling