```# Copyright 2021 Ross Wightman . All rights reserved.
#
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# Unless required by applicable law or agreed to in writing, software
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# This code is modified by Zilliz.
import math
from typing import List, Tuple
import torch
from torch import Tensor
import torch.nn.functional as F

# Calculate symmetric padding for a convolution
[docs]def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int:
"""
Calculate symmetric padding for a convolution
Args:
kernel_size(`Int`):
Convolution kernel size.
stride(`Int`):
Convolution stride parameter.
dilation(`Int`):
Convolution dilation parameter.
Returns:
(`Int`)
"""
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2

[docs]def get_same_padding(x: int, k: int, s: int, d: int) -> int:
"""
Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution
Args:
x(`Int`):
Input tensor shape.
k(`Int`):
Convolution kernel size.
s(`Int`):
Convolution stride parameter.
d(`Int`):
Convolution dilation parameter.
Returns:
(`Int`):
"""
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)

[docs]def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> bool:
"""
Can SAME padding for given args be done statically?
Args:
kernel_size(`Int`):
Convolution kernel size.
stride(`Int`):
Convolution stride parameter.
dilation(`Int`):
Convolution dilation parameter.
Returns:
(`Bool`): whether SAME padding can be done statically.
"""
return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0

[docs]def pad_same(x: torch.Tensor, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0) -> torch.Tensor:
"""
Dynamically pad input x with 'SAME' padding for conv with specified args
Args:
x(`torch.Tensor`):
Input tensor.
k(`List[Int]`):
Convolution kernel sizes.
s(`List[Int]`):
Convolution stride parameters.
d(`List[Int]`):
Convolution dilation parameter.
value(`Float`):
Returns:
(`torch.Tensor`):
Output Tensor for conv with 'SAME' padding.
"""
ih, iw = x.size()[-2:]
return x

"""
Args:
kernel_size(`Int`):
Convolution kernel size.
Returns:
"""
dynamic = False
# for any string padding, the padding will be calculated for you, one of three ways
# TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
# static case, no extra overhead
else:
dynamic = True
else:
# Default to PyTorch style 'same'-ish symmetric padding

in_height: int, in_width: int,
stride_h: int, stride_w: int,
filter_height: int, filter_width: int) -> Tensor:
"""
Args:
x(`torch.Tensor`):
Input tensor.
in_height(`int`):
Input height.
in_width(`int`):
Input width.
stride_h(`int`):
stride height.
stride_w(`int`):
stride width.
filter_height(`int`):
filter height.
filter_width(`int`):
filter width.
Returns:
(`torch.Tensor`):
Output Tensor for conv with 'SAME' padding.
"""
if in_height % stride_h == 0:
pad_along_height = max(filter_height - stride_h, 0)
else:
pad_along_height = max(filter_height - (in_height % stride_h), 0)
if in_width % stride_w == 0:
pad_along_width = max(filter_width - stride_w, 0)
else:
pad_along_width = max(filter_width - (in_width % stride_w), 0)