kgrabko commited on
Commit
64df2d6
·
verified ·
1 Parent(s): 4f9001e

Create JiRackTernaryPyTorch_70b_RopeFix.py

Browse files
Files changed (1) hide show
  1. JiRackTernaryPyTorch_70b_RopeFix.py +149 -0
JiRackTernaryPyTorch_70b_RopeFix.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #%%writefile JiRackTernaryPyTorch_70b.py
2
+ # =============================================================================
3
+ # COPYRIGHT © 2025 Konstantin Vladimirovich Grabko. ALL RIGHTS RESERVED.
4
+ # CMS Manhattan JiRack Technology — PATENT PENDING
5
+ # =============================================================================
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from typing import Optional, Tuple, Union
11
+ from transformers import PreTrainedModel, PretrainedConfig, GenerationMixin
12
+ from transformers.modeling_outputs import CausalLMOutputWithPast
13
+ from torch.utils.checkpoint import checkpoint
14
+
15
+ class JiRackTernaryConfig(PretrainedConfig):
16
+ model_type = "jirack_ternary_70b"
17
+ def __init__(self, **kwargs):
18
+ super().__init__(**kwargs)
19
+ self.vocab_size = 128256
20
+ self.hidden_size = 8192
21
+ self.intermediate_size = 28672
22
+ self.num_hidden_layers = 80
23
+ self.num_attention_heads = 64
24
+ self.num_key_value_heads = 8
25
+ self.head_dim = 128
26
+ self.rms_norm_eps = 1e-5
27
+
28
+ class JiRackBitLinear(nn.Module):
29
+ def __init__(self, in_features, out_features):
30
+ super().__init__()
31
+ self.in_features, self.out_features = in_features, out_features
32
+ self.register_buffer("packed", None)
33
+ self.register_buffer("scale", None)
34
+ self.register_buffer("orig_shape", None)
35
+
36
+ def unpack(self):
37
+ if self.packed is None: return None
38
+ p = self.packed.to(torch.int32)
39
+ b = torch.stack([(p >> 6) & 3, (p >> 4) & 3, (p >> 2) & 3, p & 3], dim=1).view(-1)
40
+ shape = self.orig_shape if self.orig_shape is not None else torch.tensor([self.out_features, self.in_features])
41
+ # Тернарная распаковка (-1, 0, 1)
42
+ w = (b[:shape.numel()].to(torch.float16) - 1.0)
43
+ return w.view(int(shape[0]), int(shape[1])) * self.scale
44
+
45
+ def forward(self, x):
46
+ w = self.unpack()
47
+ if w is None:
48
+ return F.linear(x, torch.zeros(self.out_features, self.in_features, device=x.device, dtype=x.dtype))
49
+
50
+ # Активационная квантовка (BitNet style)
51
+ x_norm = x - x.mean(dim=-1, keepdim=True)
52
+ x_scale = x_norm.abs().max(dim=-1, keepdim=True).values.clamp(min=1e-5)
53
+ return F.linear((x_norm / x_scale).to(w.dtype), w) * (x_scale * 0.67)
54
+
55
+ class RMSNorm(nn.Module):
56
+ def __init__(self, dim, eps=1e-5):
57
+ super().__init__()
58
+ self.eps, self.weight = eps, nn.Parameter(torch.ones(dim))
59
+ def forward(self, x):
60
+ v = x.to(torch.float32).pow(2).mean(-1, keepdim=True)
61
+ return (x.to(torch.float32) * torch.rsqrt(v + self.eps) * self.weight.to(torch.float32)).to(x.dtype)
62
+
63
+ def apply_rotary_emb(x, freqs):
64
+ # x: [bsz, heads, seq, head_dim]
65
+ # freqs: [seq, head_dim/2]
66
+ cos = freqs.cos().view(1, 1, freqs.shape[0], freqs.shape[1])
67
+ sin = freqs.sin().view(1, 1, freqs.shape[0], freqs.shape[1])
68
+
69
+ # Стабильный RoPE: разделяем и вращаем пары
70
+ x1 = x[..., : x.shape[-1] // 2]
71
+ x2 = x[..., x.shape[-1] // 2 :]
72
+
73
+ # Математика: x * cos + rotate(x) * sin
74
+ rotated_x = torch.cat([-x2, x1], dim=-1)
75
+ # Расширяем cos/sin до полной head_dim
76
+ cos_full = torch.cat([cos, cos], dim=-1)
77
+ sin_full = torch.cat([sin, sin], dim=-1)
78
+
79
+ return (x * cos_full) + (rotated_x * sin_full)
80
+
81
+ class JiRackAttention(nn.Module):
82
+ def __init__(self):
83
+ super().__init__()
84
+ self.q_proj = JiRackBitLinear(8192, 8192)
85
+ self.k_proj = JiRackBitLinear(8192, 1024)
86
+ self.v_proj = JiRackBitLinear(8192, 1024)
87
+ self.o_proj = JiRackBitLinear(8192, 8192)
88
+
89
+ def forward(self, x, freqs):
90
+ bsz, q_len, _ = x.shape
91
+ q = self.q_proj(x).view(bsz, q_len, 64, 128).transpose(1, 2)
92
+ k = self.k_proj(x).view(bsz, q_len, 8, 128).transpose(1, 2)
93
+ v = self.v_proj(x).view(bsz, q_len, 8, 128).transpose(1, 2)
94
+
95
+ q, k = apply_rotary_emb(q, freqs), apply_rotary_emb(k, freqs)
96
+
97
+ # GQA (Grouped Query Attention) для 70B
98
+ k = k.repeat_interleave(8, dim=1)
99
+ v = v.repeat_interleave(8, dim=1)
100
+
101
+ attn = F.scaled_dot_product_attention(q, k, v, is_causal=True)
102
+ return self.o_proj(attn.transpose(1, 2).reshape(bsz, q_len, -1))
103
+
104
+ class JiRackDecoderLayer(nn.Module):
105
+ def __init__(self, layer_idx):
106
+ super().__init__()
107
+ self.self_attn = JiRackAttention()
108
+ self.gate_proj = JiRackBitLinear(8192, 28672)
109
+ self.up_proj = JiRackBitLinear(8192, 28672)
110
+ self.down_proj = JiRackBitLinear(28672, 8192)
111
+ self.input_layernorm = RMSNorm(8192)
112
+ self.post_attention_layernorm = RMSNorm(8192)
113
+
114
+ def forward(self, x, freqs):
115
+ h = self.self_attn(self.input_layernorm(x), freqs)
116
+ x = x + h * 0.4
117
+ # SwiGLU MLP
118
+ mlp_act = F.silu(self.gate_proj(self.post_attention_layernorm(x)))
119
+ mlp_res = mlp_act * self.up_proj(self.post_attention_layernorm(x))
120
+ return x + self.down_proj(mlp_res) * 0.4
121
+
122
+ class JiRackTernary70B(PreTrainedModel, GenerationMixin):
123
+ config_class = JiRackTernaryConfig
124
+ def __init__(self, config):
125
+ super().__init__(config)
126
+ self.embed_tokens = nn.Embedding(128256, 8192)
127
+ self.layers = nn.ModuleList([JiRackDecoderLayer(i) for i in range(80)])
128
+ self.norm = RMSNorm(8192)
129
+ self.lm_head = nn.Linear(8192, 128256, bias=False)
130
+
131
+ # RoPE инверсные частоты (Llama 3 base = 500k)
132
+ inv_freq = 1.0 / (500000.0 ** (torch.arange(0, 128, 2).float() / 128))
133
+ self.register_buffer("inv_freq", inv_freq)
134
+ self.use_gc = False
135
+
136
+ def forward(self, input_ids, **kwargs):
137
+ x = self.embed_tokens(input_ids)
138
+ # Считаем частоты в float32 для точности
139
+ t = torch.arange(x.shape[1], device=x.device, dtype=torch.float32)
140
+ freqs = torch.outer(t, self.inv_freq.to(torch.float32))
141
+
142
+ for layer in self.layers:
143
+ if self.training and self.use_gc:
144
+ x = checkpoint(layer, x, freqs, use_reentrant=False)
145
+ else:
146
+ x = layer(x, freqs)
147
+
148
+ # Финальный скейлинг логитов 0.8 — для 70B это база
149
+ return CausalLMOutputWithPast(logits=self.lm_head(self.norm(x)) * 0.8)