# Copyright 2022 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Original code from https://github.com/jhcho99/CoFormer.
#
# Modified by Zilliz.
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
from towhee.models.coformer.utils import NestedTensor
[docs]class PositionEmbedding(nn.Module):
"""
Standard positional encoding
"""
[docs] def __init__(self, d_model, max_len=5000, dropout=0.1):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
[docs] def forward(self, x):
if isinstance(x, NestedTensor):
x = x.tensors
x = x + self.pe[:x.size(0), :]
x = self.dropout(x)
return x
[docs]class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
[docs] def __init__(self, num_pos_feats=256):
super().__init__()
self.row_embed = nn.Embedding(50, num_pos_feats)
self.col_embed = nn.Embedding(50, num_pos_feats)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.row_embed.weight)
nn.init.uniform_(self.col_embed.weight)
[docs] def forward(self, x):
if isinstance(x, NestedTensor):
x = x.tensors
h, w = x.shape[-2:]
i = torch.arange(w, device=x.device)
j = torch.arange(h, device=x.device)
x_emb = self.col_embed(i)
y_emb = self.row_embed(j)
pos = torch.cat([
x_emb.unsqueeze(0).repeat(h, 1, 1),
y_emb.unsqueeze(1).repeat(1, w, 1),
], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
return pos
[docs]def build_position_encoding(
hidden_dim=512,
position_embedding='sine',
max_len=5000,
dropout=0.
):
n_steps = hidden_dim // 2
if position_embedding in 'sine':
position_embedding = PositionEmbedding(n_steps, max_len=max_len, dropout=dropout)
elif position_embedding in 'learned':
position_embedding = PositionEmbeddingLearned(n_steps)
else:
raise ValueError(f'not supported {position_embedding}')
return position_embedding