| import torch
|
| import torch.nn as nn
|
| import torch.nn.functional as F
|
| import numpy as np
|
| import math
|
| from typing import Optional, List, Tuple, Dict, Callable
|
| from enum import Enum
|
|
|
| class FractalPatternType(Enum):
|
| """Tipos de padrões fractais suportados pela FractalBrainNet"""
|
| MANDELBROT = "mandelbrot"
|
| SIERPINSKI = "sierpinski"
|
| JULIA = "julia"
|
| CANTOR = "cantor"
|
| DRAGON_CURVE = "dragon_curve"
|
|
|
| class FractalPatternGenerator:
|
| """
|
| Gerador de padrões fractais para definir as regras de conexão
|
| entre neurônios na FractalBrainNet.
|
| """
|
|
|
| @staticmethod
|
| def mandelbrot_connectivity(width: int, height: int, max_iter: int = 100) -> torch.Tensor:
|
| """
|
| Gera matriz de conectividade baseada no conjunto de Mandelbrot.
|
| Valores mais altos indicam conexões mais fortes.
|
| """
|
| x = torch.linspace(-2.5, 1.5, width)
|
| y = torch.linspace(-1.5, 1.5, height)
|
| X, Y = torch.meshgrid(x, y, indexing='ij')
|
| c = X + 1j * Y
|
| z = torch.zeros_like(c)
|
|
|
| connectivity = torch.zeros(width, height)
|
|
|
| for i in range(max_iter):
|
| mask = torch.abs(z) <= 2
|
| z[mask] = z[mask] ** 2 + c[mask]
|
| connectivity[mask] += 1
|
|
|
|
|
| connectivity = connectivity / max_iter
|
| return connectivity
|
|
|
| @staticmethod
|
| def sierpinski_connectivity(size: int, iterations: int = 5) -> torch.Tensor:
|
| """
|
| Gera matriz de conectividade baseada no triângulo de Sierpinski.
|
| """
|
| pattern = torch.zeros(size, size)
|
| pattern[0, size//2] = 1.0
|
|
|
| for _ in range(iterations):
|
| new_pattern = torch.zeros_like(pattern)
|
| for i in range(size-1):
|
| for j in range(size-1):
|
| if pattern[i, j] > 0:
|
|
|
| if i+1 < size and j > 0:
|
| new_pattern[i+1, j-1] = 1.0
|
| if i+1 < size and j+1 < size:
|
| new_pattern[i+1, j+1] = 1.0
|
| pattern = torch.maximum(pattern, new_pattern)
|
|
|
| return pattern
|
|
|
| @staticmethod
|
| def julia_connectivity(width: int, height: int, c_real: float = -0.7,
|
| c_imag: float = 0.27015, max_iter: int = 100) -> torch.Tensor:
|
| """
|
| Gera matriz de conectividade baseada no conjunto de Julia.
|
| """
|
| x = torch.linspace(-2, 2, width)
|
| y = torch.linspace(-2, 2, height)
|
| X, Y = torch.meshgrid(x, y, indexing='ij')
|
| z = X + 1j * Y
|
| c = complex(c_real, c_imag)
|
|
|
| connectivity = torch.zeros(width, height)
|
|
|
| for i in range(max_iter):
|
| mask = torch.abs(z) <= 2
|
| z[mask] = z[mask] ** 2 + c
|
| connectivity[mask] += 1
|
|
|
| connectivity = connectivity / max_iter
|
| return connectivity
|
|
|
| class CerebralDynamicsModule(nn.Module):
|
| """
|
| Módulo que simula dinâmicas cerebrais através de processamento
|
| distribuído e paralelo, inspirado na organização hierárquica do cérebro.
|
| """
|
|
|
| def __init__(self, channels: int, fractal_pattern: torch.Tensor):
|
| super().__init__()
|
| self.channels = channels
|
| self.fractal_pattern = nn.Parameter(fractal_pattern, requires_grad=False)
|
|
|
|
|
| self.alpha_processing = nn.Conv2d(channels, channels//4, 1)
|
| self.beta_processing = nn.Conv2d(channels, channels//4, 1)
|
| self.gamma_processing = nn.Conv2d(channels, channels//4, 1)
|
| self.theta_processing = nn.Conv2d(channels, channels//4, 1)
|
|
|
|
|
| self.integration = nn.Conv2d(channels, channels, 1)
|
| self.normalization = nn.LayerNorm([channels])
|
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| batch_size, channels, height, width = x.shape
|
|
|
|
|
| if self.fractal_pattern.shape[-2:] != (height, width):
|
| fractal_mask = F.interpolate(
|
| self.fractal_pattern.unsqueeze(0).unsqueeze(0),
|
| size=(height, width), mode='bilinear', align_corners=False
|
| ).squeeze()
|
| else:
|
| fractal_mask = self.fractal_pattern
|
|
|
|
|
| alpha = self.alpha_processing(x) * fractal_mask.unsqueeze(0).unsqueeze(0)
|
| beta = self.beta_processing(x) * (1 - fractal_mask.unsqueeze(0).unsqueeze(0))
|
| gamma = self.gamma_processing(x) * fractal_mask.unsqueeze(0).unsqueeze(0) * 0.5
|
| theta = self.theta_processing(x) * torch.sin(fractal_mask * math.pi).unsqueeze(0).unsqueeze(0)
|
|
|
|
|
| combined = torch.cat([alpha, beta, gamma, theta], dim=1)
|
| integrated = self.integration(combined)
|
|
|
|
|
| integrated = integrated.permute(0, 2, 3, 1)
|
| integrated = self.normalization(integrated)
|
| integrated = integrated.permute(0, 3, 1, 2)
|
|
|
| return integrated + x
|
|
|
| class FractalNeuralBlock(nn.Module):
|
| """
|
| Bloco neural fractal que implementa a regra de expansão fractal
|
| com dinâmicas cerebrais integradas.
|
| """
|
|
|
| def __init__(self, level: int, in_channels: int, out_channels: int,
|
| fractal_pattern: torch.Tensor, drop_path_prob: float = 0.1):
|
| super().__init__()
|
| self.level = level
|
|
|
| if level == 1:
|
|
|
| self.base_conv = nn.Conv2d(in_channels, out_channels, 3, padding=1)
|
| self.cerebral_dynamics = CerebralDynamicsModule(out_channels, fractal_pattern)
|
| self.activation = nn.GELU()
|
| self.norm = nn.BatchNorm2d(out_channels)
|
| else:
|
|
|
| self.deep_branch = nn.Sequential(
|
| FractalNeuralBlock(level-1, in_channels, out_channels, fractal_pattern, drop_path_prob),
|
| FractalNeuralBlock(level-1, out_channels, out_channels, fractal_pattern, drop_path_prob)
|
| )
|
|
|
| self.shallow_branch = nn.Sequential(
|
| nn.Conv2d(in_channels, out_channels, 3, padding=1),
|
| CerebralDynamicsModule(out_channels, fractal_pattern),
|
| nn.BatchNorm2d(out_channels),
|
| nn.GELU()
|
| )
|
|
|
|
|
| self.fractal_attention = nn.Sequential(
|
| nn.Conv2d(out_channels * 2, out_channels // 4, 1),
|
| nn.GELU(),
|
| nn.Conv2d(out_channels // 4, 2, 1),
|
| nn.Sigmoid()
|
| )
|
|
|
| self.drop_path = nn.Dropout2d(drop_path_prob) if drop_path_prob > 0 else nn.Identity()
|
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| if self.level == 1:
|
| out = self.base_conv(x)
|
| out = self.norm(out)
|
| out = self.cerebral_dynamics(out)
|
| out = self.activation(out)
|
| return self.drop_path(out)
|
| else:
|
|
|
| deep_out = self.deep_branch(x)
|
| shallow_out = self.shallow_branch(x)
|
|
|
|
|
| combined = torch.cat([deep_out, shallow_out], dim=1)
|
| attention_weights = self.fractal_attention(combined)
|
|
|
|
|
| alpha, beta = attention_weights.chunk(2, dim=1)
|
| result = alpha * deep_out + beta * shallow_out
|
|
|
| return self.drop_path(result)
|
|
|
| class AdaptiveScaleProcessor(nn.Module):
|
| """
|
| Processador adaptativo que opera em múltiplas escalas,
|
| simulando a capacidade do cérebro de processar informações
|
| em diferentes níveis de abstração simultaneamente.
|
| """
|
|
|
| def __init__(self, channels: int):
|
| super().__init__()
|
| self.channels = channels
|
|
|
|
|
| self.local_processor = nn.Conv2d(channels, channels, 1)
|
| self.regional_processor = nn.Conv2d(channels, channels, 3, padding=1)
|
| self.global_processor = nn.Conv2d(channels, channels, 5, padding=2)
|
|
|
|
|
| self.scale_fusion = nn.Sequential(
|
| nn.Conv2d(channels * 3, channels, 1),
|
| nn.BatchNorm2d(channels),
|
| nn.GELU()
|
| )
|
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| local = self.local_processor(x)
|
| regional = self.regional_processor(x)
|
| global_proc = self.global_processor(x)
|
|
|
|
|
| multi_scale = torch.cat([local, regional, global_proc], dim=1)
|
| fused = self.scale_fusion(multi_scale)
|
|
|
| return fused + x
|
|
|
| class FractalBrainNet(nn.Module):
|
| """
|
| FractalBrainNet: Rede neural que combina a profundidade das redes profundas
|
| com a complexidade e elegância dos fractais, capaz de emular dinâmicas cerebrais
|
| através de estruturas hierárquicas e auto-similares.
|
|
|
| Baseada no artigo de Jose R. F. Junior (2024).
|
| """
|
|
|
| def __init__(self,
|
| num_classes: int = 10,
|
| in_channels: int = 3,
|
| fractal_levels: List[int] = [2, 3, 4, 5],
|
| base_channels: int = 64,
|
| fractal_pattern_type: FractalPatternType = FractalPatternType.MANDELBROT,
|
| pattern_resolution: int = 32,
|
| drop_path_prob: float = 0.15,
|
| enable_continuous_learning: bool = True):
|
|
|
| super().__init__()
|
|
|
| self.num_classes = num_classes
|
| self.fractal_levels = fractal_levels
|
| self.enable_continuous_learning = enable_continuous_learning
|
|
|
|
|
| self.fractal_pattern = self._generate_fractal_pattern(
|
| fractal_pattern_type, pattern_resolution
|
| )
|
|
|
|
|
| self.stem = nn.Sequential(
|
| nn.Conv2d(in_channels, base_channels, 7, stride=2, padding=3),
|
| nn.BatchNorm2d(base_channels),
|
| nn.GELU(),
|
| AdaptiveScaleProcessor(base_channels)
|
| )
|
|
|
|
|
| self.fractal_stages = nn.ModuleList()
|
| current_channels = base_channels
|
|
|
| for i, level in enumerate(fractal_levels):
|
|
|
| stage_channels = base_channels * (2 ** min(i, 4))
|
|
|
|
|
| fractal_block = FractalNeuralBlock(
|
| level, current_channels, stage_channels,
|
| self.fractal_pattern, drop_path_prob
|
| )
|
|
|
|
|
| scale_processor = AdaptiveScaleProcessor(stage_channels)
|
|
|
|
|
| if i < len(fractal_levels) - 1:
|
| pooling = nn.Sequential(
|
| nn.Conv2d(stage_channels, stage_channels, 3, stride=2, padding=1),
|
| nn.BatchNorm2d(stage_channels),
|
| nn.GELU()
|
| )
|
| else:
|
| pooling = nn.Identity()
|
|
|
| self.fractal_stages.append(nn.Sequential(
|
| fractal_block,
|
| scale_processor,
|
| pooling
|
| ))
|
|
|
| current_channels = stage_channels
|
|
|
|
|
| self.global_attention = nn.MultiheadAttention(
|
| current_channels, num_heads=8, batch_first=True
|
| )
|
|
|
|
|
| self.adaptive_pool = nn.AdaptiveAvgPool2d((1, 1))
|
| self.classifier = nn.Sequential(
|
| nn.Dropout(0.2),
|
| nn.Linear(current_channels, current_channels // 2),
|
| nn.GELU(),
|
| nn.Dropout(0.1),
|
| nn.Linear(current_channels // 2, num_classes)
|
| )
|
|
|
|
|
| if enable_continuous_learning:
|
| self.meta_learner = nn.Sequential(
|
| nn.Linear(current_channels, current_channels // 4),
|
| nn.GELU(),
|
| nn.Linear(current_channels // 4, current_channels)
|
| )
|
|
|
| self._initialize_weights()
|
|
|
| def _generate_fractal_pattern(self, pattern_type: FractalPatternType,
|
| resolution: int) -> torch.Tensor:
|
| """Gera o padrão fractal base para a rede."""
|
| if pattern_type == FractalPatternType.MANDELBROT:
|
| return FractalPatternGenerator.mandelbrot_connectivity(resolution, resolution)
|
| elif pattern_type == FractalPatternType.SIERPINSKI:
|
| return FractalPatternGenerator.sierpinski_connectivity(resolution)
|
| elif pattern_type == FractalPatternType.JULIA:
|
| return FractalPatternGenerator.julia_connectivity(resolution, resolution)
|
| else:
|
|
|
| return FractalPatternGenerator.mandelbrot_connectivity(resolution, resolution)
|
|
|
| def _initialize_weights(self):
|
| """Inicialização de pesos inspirada na neuroplasticidade."""
|
| for m in self.modules():
|
| if isinstance(m, nn.Conv2d):
|
|
|
| nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
| if m.bias is not None:
|
| nn.init.constant_(m.bias, 0)
|
| elif isinstance(m, (nn.BatchNorm2d, nn.LayerNorm)):
|
| nn.init.constant_(m.weight, 1.0)
|
| nn.init.constant_(m.bias, 0.0)
|
| elif isinstance(m, nn.Linear):
|
| nn.init.trunc_normal_(m.weight, std=0.02)
|
| if m.bias is not None:
|
| nn.init.constant_(m.bias, 0)
|
|
|
| def forward(self, x: torch.Tensor,
|
| return_attention_maps: bool = False) -> torch.Tensor:
|
| """
|
| Forward pass com processamento hierárquico e dinâmicas cerebrais.
|
| """
|
|
|
| x = self.stem(x)
|
|
|
| attention_maps = []
|
|
|
|
|
| for stage in self.fractal_stages:
|
| x = stage(x)
|
|
|
| if return_attention_maps:
|
|
|
| attention_map = torch.mean(x, dim=1, keepdim=True)
|
| attention_maps.append(attention_map)
|
|
|
|
|
| pooled = self.adaptive_pool(x)
|
| features = pooled.flatten(1)
|
|
|
|
|
| if hasattr(self, 'global_attention'):
|
|
|
| attn_input = features.unsqueeze(1)
|
| attended, _ = self.global_attention(attn_input, attn_input, attn_input)
|
| features = attended.squeeze(1)
|
|
|
|
|
| if self.enable_continuous_learning and hasattr(self, 'meta_learner'):
|
| meta_features = self.meta_learner(features)
|
| features = features + 0.1 * meta_features
|
|
|
|
|
| output = self.classifier(features)
|
|
|
| if return_attention_maps:
|
| return output, attention_maps
|
| return output
|
|
|
| def analyze_fractal_patterns(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
|
| """
|
| Analisa os padrões emergentes gerados pela estrutura fractal,
|
| conforme mencionado na metodologia do artigo.
|
| """
|
| self.eval()
|
| with torch.no_grad():
|
| _, attention_maps = self.forward(x, return_attention_maps=True)
|
|
|
| analysis = {
|
| 'fractal_pattern': self.fractal_pattern,
|
| 'attention_maps': attention_maps,
|
| 'pattern_complexity': self._compute_pattern_complexity(attention_maps),
|
| 'hierarchical_organization': self._analyze_hierarchical_organization(attention_maps)
|
| }
|
|
|
| return analysis
|
|
|
| def _compute_pattern_complexity(self, attention_maps: List[torch.Tensor]) -> List[float]:
|
| """Computa a complexidade dos padrões emergentes."""
|
| complexities = []
|
| for attention_map in attention_maps:
|
|
|
| flat_map = attention_map.flatten()
|
| prob_dist = F.softmax(flat_map, dim=0)
|
| entropy = -torch.sum(prob_dist * torch.log(prob_dist + 1e-8))
|
| complexities.append(entropy.item())
|
| return complexities
|
|
|
| def _analyze_hierarchical_organization(self, attention_maps: List[torch.Tensor]) -> Dict[str, float]:
|
| """Analisa a organização hierárquica dos padrões."""
|
| if len(attention_maps) < 2:
|
| return {'correlation': 0.0, 'hierarchy_score': 0.0}
|
|
|
|
|
| correlations = []
|
| for i in range(len(attention_maps) - 1):
|
| map1 = attention_maps[i].flatten()
|
| map2 = F.interpolate(attention_maps[i+1], size=attention_maps[i].shape[-2:],
|
| mode='bilinear', align_corners=False).flatten()
|
| correlation = torch.corrcoef(torch.stack([map1, map2]))[0, 1]
|
| correlations.append(correlation.item())
|
|
|
| avg_correlation = sum(correlations) / len(correlations)
|
| hierarchy_score = 1.0 - avg_correlation
|
|
|
| return {
|
| 'correlation': avg_correlation,
|
| 'hierarchy_score': hierarchy_score
|
| }
|
|
|
|
|
| def create_fractal_brain_net(model_size: str = 'medium',
|
| num_classes: int = 10,
|
| fractal_pattern: FractalPatternType = FractalPatternType.MANDELBROT) -> FractalBrainNet:
|
| """
|
| Cria modelos FractalBrainNet pré-configurados inspirados no artigo de Jose R. F. Junior.
|
|
|
| Args:
|
| model_size: 'small', 'medium', 'large', 'xlarge'
|
| num_classes: número de classes para classificação
|
| fractal_pattern: tipo de padrão fractal a ser usado
|
| """
|
| configs = {
|
| 'small': {
|
| 'fractal_levels': [2, 3],
|
| 'base_channels': 32,
|
| 'pattern_resolution': 16
|
| },
|
| 'medium': {
|
| 'fractal_levels': [2, 3, 4],
|
| 'base_channels': 64,
|
| 'pattern_resolution': 32
|
| },
|
| 'large': {
|
| 'fractal_levels': [2, 3, 4, 5],
|
| 'base_channels': 96,
|
| 'pattern_resolution': 64
|
| },
|
| 'xlarge': {
|
| 'fractal_levels': [3, 4, 5, 6],
|
| 'base_channels': 128,
|
| 'pattern_resolution': 128
|
| }
|
| }
|
|
|
| config = configs.get(model_size, configs['medium'])
|
|
|
| return FractalBrainNet(
|
| num_classes=num_classes,
|
| fractal_levels=config['fractal_levels'],
|
| base_channels=config['base_channels'],
|
| fractal_pattern_type=fractal_pattern,
|
| pattern_resolution=config['pattern_resolution']
|
| )
|
|
|
|
|
| if __name__ == "__main__":
|
| print("=== FractalBrainNet - Implementação Avançada ===")
|
| print("Baseada no artigo de Jose R. F. Junior (2024)")
|
| print()
|
|
|
|
|
| models = {
|
| 'Mandelbrot': create_fractal_brain_net('medium', 10, FractalPatternType.MANDELBROT),
|
| 'Sierpinski': create_fractal_brain_net('medium', 10, FractalPatternType.SIERPINSKI),
|
| 'Julia': create_fractal_brain_net('medium', 10, FractalPatternType.JULIA)
|
| }
|
|
|
|
|
| dummy_input = torch.randn(2, 3, 64, 64)
|
|
|
| for name, model in models.items():
|
| print(f"\n=== Modelo com padrão {name} ===")
|
|
|
|
|
| output = model(dummy_input)
|
| print(f"Output shape: {output.shape}")
|
|
|
|
|
| analysis = model.analyze_fractal_patterns(dummy_input)
|
| print(f"Níveis de atenção capturados: {len(analysis['attention_maps'])}")
|
| print(f"Complexidade dos padrões: {analysis['pattern_complexity']}")
|
| print(f"Organização hierárquica: {analysis['hierarchical_organization']}")
|
|
|
|
|
| total_params = sum(p.numel() for p in model.parameters())
|
| print(f"Parâmetros totais: {total_params:,}")
|
|
|
| print("\n=== FractalBrainNet criada com sucesso! ===")
|
| print("Esta implementação incorpora:")
|
| print("- Padrões fractais (Mandelbrot, Sierpinski, Julia)")
|
| print("- Simulação de dinâmicas cerebrais")
|
| print("- Processamento hierárquico e multi-escala")
|
| print("- Mecanismos de atenção inspirados no cérebro")
|
| print("- Capacidade de aprendizado contínuo")
|
| print("- Análise de padrões emergentes")
|
|
|
|
|
| """
|
| # Criar modelo com padrão Mandelbrot
|
| model = create_fractal_brain_net('large', num_classes=1000,
|
| fractal_pattern=FractalPatternType.MANDELBROT)
|
|
|
| # Forward pass normal
|
| output = model(input_tensor)
|
|
|
| # Análise de padrões emergentes
|
| analysis = model.analyze_fractal_patterns(input_tensor)
|
| print("Complexidade dos padrões:", analysis['pattern_complexity'])
|
| print("Organização hierárquica:", analysis['hierarchical_organization'])
|
| """ |