|
11 | 11 | ParamScheduler, |
12 | 12 | ) |
13 | 13 |
|
| 14 | +try: |
| 15 | + from torch.optim.lr_scheduler import LRScheduler |
| 16 | +except ImportError: |
| 17 | + from torch.optim.lr_scheduler import _LRScheduler as LRScheduler |
| 18 | + |
14 | 19 | logger = logging.getLogger(__name__) |
15 | 20 |
|
16 | 21 |
|
@@ -52,7 +57,7 @@ def __init__( |
52 | 57 | ) |
53 | 58 |
|
54 | 59 |
|
55 | | -class LRMultiplier(torch.optim.lr_scheduler._LRScheduler): |
| 60 | +class LRMultiplier(LRScheduler): |
56 | 61 | """ |
57 | 62 | A LRScheduler which uses fvcore :class:`ParamScheduler` to multiply the |
58 | 63 | learning rate of each param in the optimizer. |
@@ -95,7 +100,7 @@ def __init__( |
95 | 100 | ): |
96 | 101 | """ |
97 | 102 | Args: |
98 | | - optimizer, last_iter: See ``torch.optim.lr_scheduler._LRScheduler``. |
| 103 | + optimizer, last_iter: See ``torch.optim.lr_scheduler.LRScheduler``. |
99 | 104 | ``last_iter`` is the same as ``last_epoch``. |
100 | 105 | multiplier: a fvcore ParamScheduler that defines the multiplier on |
101 | 106 | every LR of the optimizer |
@@ -132,7 +137,7 @@ def get_lr(self) -> List[float]: |
132 | 137 | # MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it. |
133 | 138 |
|
134 | 139 |
|
135 | | -class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): |
| 140 | +class WarmupMultiStepLR(LRScheduler): |
136 | 141 | def __init__( |
137 | 142 | self, |
138 | 143 | optimizer: torch.optim.Optimizer, |
@@ -171,7 +176,7 @@ def _compute_values(self) -> List[float]: |
171 | 176 | return self.get_lr() |
172 | 177 |
|
173 | 178 |
|
174 | | -class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler): |
| 179 | +class WarmupCosineLR(LRScheduler): |
175 | 180 | def __init__( |
176 | 181 | self, |
177 | 182 | optimizer: torch.optim.Optimizer, |
|
0 commit comments