Skip to content

Commit 1c22801

Browse files
Auto-format code with black
1 parent b36492d commit 1c22801

15 files changed

+66
-46
lines changed

thinc/backends/ops.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -229,56 +229,56 @@ def affine(self, X: Floats2d, W: Floats2d, b: Floats1d) -> Floats2d:
229229
Y += b
230230
return Y
231231

232-
@overload
232+
@overload
233233
def flatten(
234234
self,
235235
X: List[Floats2d],
236236
dtype: Optional[DTypes] = None,
237237
pad: int = 0,
238238
ndim_if_empty: int = 2,
239-
) -> Floats2d:
239+
) -> Floats2d:
240240
...
241241

242-
@overload
242+
@overload
243243
def flatten(
244244
self,
245245
X: List[Ints1d],
246246
dtype: Optional[DTypes] = None,
247247
pad: int = 0,
248248
ndim_if_empty: int = 2,
249-
) -> Ints1d:
249+
) -> Ints1d:
250250
...
251251

252-
@overload
252+
@overload
253253
def flatten(
254254
self,
255255
X: List2d,
256256
dtype: Optional[DTypes] = None,
257257
pad: int = 0,
258258
ndim_if_empty: int = 2,
259-
) -> Array2d:
259+
) -> Array2d:
260260
...
261261

262262
# further specific typed signatures can be added as necessary
263263

264-
@overload
264+
@overload
265265
def flatten(
266266
self,
267267
X: ListXd,
268268
dtype: Optional[DTypes] = None,
269269
pad: int = 0,
270270
ndim_if_empty: int = 2,
271-
) -> ArrayXd:
271+
) -> ArrayXd:
272272
...
273273

274-
@overload
274+
@overload
275275
def flatten(
276276
self,
277277
X: Sequence[ArrayXd],
278278
dtype: Optional[DTypes] = None,
279279
pad: int = 0,
280280
ndim_if_empty: int = 2,
281-
) -> ArrayXd:
281+
) -> ArrayXd:
282282
...
283283

284284
def flatten(

thinc/layers/gelu.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,9 @@ def Gelu(
3434
return model
3535

3636

37-
def forward(model: Model[Floats2d, Floats2d],
38-
X: Floats2d, is_train: bool) -> Tuple[Floats2d, Callable]:
37+
def forward(
38+
model: Model[Floats2d, Floats2d], X: Floats2d, is_train: bool
39+
) -> Tuple[Floats2d, Callable]:
3940
W = cast(Floats2d, model.get_param("W"))
4041
b = cast(Floats1d, model.get_param("b"))
4142
Y_preact = model.ops.affine(X, W, b)

thinc/layers/hard_swish.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,9 @@ def HardSwish(
3434
return model
3535

3636

37-
def forward(model: Model[Floats2d, Floats2d],
38-
X: Floats2d, is_train: bool) -> Tuple[Floats2d, Callable]:
37+
def forward(
38+
model: Model[Floats2d, Floats2d], X: Floats2d, is_train: bool
39+
) -> Tuple[Floats2d, Callable]:
3940
W = cast(Floats2d, model.get_param("W"))
4041
b = cast(Floats1d, model.get_param("b"))
4142
Y_preact = model.ops.affine(X, W, b)

thinc/layers/hard_swish_mobilenet.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,17 +34,16 @@ def HardSwishMobilenet(
3434
return model
3535

3636

37-
def forward(model: Model[Floats2d, Floats2d],
38-
X: Floats2d, is_train: bool) -> Tuple[Floats2d, Callable]:
37+
def forward(
38+
model: Model[Floats2d, Floats2d], X: Floats2d, is_train: bool
39+
) -> Tuple[Floats2d, Callable]:
3940
W = cast(Floats2d, model.get_param("W"))
4041
b = cast(Floats1d, model.get_param("b"))
4142
Y_preact = model.ops.affine(X, W, b)
4243
Y = model.ops.hard_swish_mobilenet(Y_preact)
4344

4445
def backprop(dY: Floats2d) -> Floats2d:
45-
dY = model.ops.backprop_hard_swish_mobilenet(dY,
46-
Y_preact,
47-
inplace=False)
46+
dY = model.ops.backprop_hard_swish_mobilenet(dY, Y_preact, inplace=False)
4847
model.inc_grad("b", dY.sum(axis=0))
4948
model.inc_grad("W", model.ops.gemm(dY, X, trans1=True))
5049
return model.ops.gemm(dY, W)

thinc/layers/layernorm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ def LayerNorm(nI: Optional[int] = None) -> Model[InT, InT]:
1717
forward,
1818
init=init,
1919
dims={"nI": nI, "nO": nI},
20-
params={"G": None, "b": None}
20+
params={"G": None, "b": None},
2121
)
2222

2323

thinc/layers/swish.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,9 @@ def Swish(
3434
return model
3535

3636

37-
def forward(model: Model[Floats2d, Floats2d],
38-
X: Floats2d, is_train: bool) -> Tuple[Floats2d, Callable]:
37+
def forward(
38+
model: Model[Floats2d, Floats2d], X: Floats2d, is_train: bool
39+
) -> Tuple[Floats2d, Callable]:
3940
W = cast(Floats2d, model.get_param("W"))
4041
b = cast(Floats1d, model.get_param("b"))
4142
Y_preact = model.ops.affine(X, W, b)

thinc/model.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -464,7 +464,9 @@ def copy(self: SelfT) -> SelfT:
464464
"""
465465
return self._copy()
466466

467-
def _copy(self: SelfT, seen: Optional[Dict[int, Union["Model", Shim]]] = None) -> SelfT:
467+
def _copy(
468+
self: SelfT, seen: Optional[Dict[int, Union["Model", Shim]]] = None
469+
) -> SelfT:
468470
if seen is None:
469471
seen = {}
470472
params = {}

thinc/optimizers.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ def _radam(self, ops, weights, grad, lr_scale, key, nr_upd):
279279

280280
# exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
281281
exp_avg_sq *= beta2
282-
exp_avg_sq += (1 - beta2) * (gradient_1D ** 2)
282+
exp_avg_sq += (1 - beta2) * (gradient_1D**2)
283283
# exp_avg.mul_(beta1).add_(1 - beta1, grad)
284284
exp_avg *= beta1
285285
exp_avg += (1 - beta1) * gradient_1D
@@ -338,9 +338,9 @@ def _adam(self, ops, weights, gradient, lr_scale, key, nr_upd):
338338
mom2 = self.mom2[key]
339339
b1 = self.b1
340340
b2 = self.b2
341-
fix1 = 1.0 - (b1 ** nr_upd)
342-
fix2 = 1.0 - (b2 ** nr_upd)
343-
lr = self.learn_rate * fix2 ** 0.5 / fix1
341+
fix1 = 1.0 - (b1**nr_upd)
342+
fix2 = 1.0 - (b2**nr_upd)
343+
lr = self.learn_rate * fix2**0.5 / fix1
344344
eps = self.eps
345345
# needs to be 1D going into the adam function
346346
weights_1D, gradient_1D, mom1, mom2 = ops.adam(

thinc/tests/layers/test_combinators.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -271,10 +271,7 @@ def test_concatenate():
271271
def test_map_list():
272272
nI = 4
273273
nO = 9
274-
Xs = [
275-
numpy.zeros((6, nI), dtype="f"),
276-
numpy.ones((3, nI), dtype="f")
277-
]
274+
Xs = [numpy.zeros((6, nI), dtype="f"), numpy.ones((3, nI), dtype="f")]
278275
Y_shapes = [(x.shape[0], nO) for x in Xs]
279276
model = map_list(Linear())
280277
model.initialize(X=Xs, Y=[numpy.zeros(shape, dtype="f") for shape in Y_shapes])

thinc/tests/layers/test_pytorch_wrapper.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,9 @@ def test_pytorch_wrapper(nN, nI, nO):
6464
assert isinstance(model.predict(X), numpy.ndarray)
6565

6666

67-
@pytest.mark.skipif(not has_cupy or not has_torch_gpu, reason="needs PyTorch with CUDA-capable GPU")
67+
@pytest.mark.skipif(
68+
not has_cupy or not has_torch_gpu, reason="needs PyTorch with CUDA-capable GPU"
69+
)
6870
@pytest.mark.parametrize("nN,nI,nO", [(2, 3, 4)])
6971
@pytest.mark.parametrize("mixed_precision", TORCH_MIXED_PRECISION)
7072
def test_pytorch_wrapper_thinc_input(nN, nI, nO, mixed_precision):

0 commit comments

Comments
 (0)