import torch.nn as nn
class OptimizedMLP(nn.Module): def init(self, input_size, hidden_sizes, output_size, dropout=0.1): super().init() layers = [] in_features = input_size
for hidden_size in hidden_sizes:
layers.extend([
nn.Linear(in_features, hidden_size),
nn.LayerNorm(hidden_size),
nn.ReLU(inplace=True),
nn.Dropout(dropout)
])
in_features = hidden_size
# اضافه کردن LayerNorm و Dropout برای لایه خروجی
layers.extend([
nn.Linear(in_features, output_size),
nn.LayerNorm(output_size),
nn.Dropout(dropout)
])
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
Log in or sign up for Devpost to join the conversation.