Coverage for anfis_toolbox / membership.py: 100%

616 statements  

« prev     ^ index     » next       coverage.py v7.13.3, created at 2026-02-05 18:47 -0300

1from abc import ABC, abstractmethod 

2from typing import cast 

3 

4import numpy as np 

5 

6 

7# Shared helpers for smooth S/Z transitions 

8def _smoothstep(t: np.ndarray) -> np.ndarray: 

9 """Cubic smoothstep S(t) = 3t^2 - 2t^3 for t in [0,1].""" 

10 return 3.0 * t**2 - 2.0 * t**3 

11 

12 

13def _dsmoothstep_dt(t: np.ndarray) -> np.ndarray: 

14 """Derivative of smoothstep: dS/dt = 6t(1-t).""" 

15 return 6.0 * t * (1.0 - t) 

16 

17 

18class MembershipFunction(ABC): 

19 """Abstract base class for membership functions. 

20 

21 This class defines the interface that all membership functions must implement 

22 in the ANFIS system. It provides common functionality for parameter management, 

23 gradient computation, and forward/backward propagation. 

24 

25 Attributes: 

26 parameters (dict): Dictionary containing the function's parameters. 

27 gradients (dict): Dictionary containing gradients for each parameter. 

28 last_input (np.ndarray): Last input processed by the function. 

29 last_output (np.ndarray): Last output computed by the function. 

30 """ 

31 

32 def __init__(self) -> None: 

33 """Initializes the membership function with empty parameters and gradients.""" 

34 self.parameters: dict[str, float] = {} 

35 self.gradients: dict[str, float] = {} 

36 self.last_input: np.ndarray | None = None 

37 self.last_output: np.ndarray | None = None 

38 

39 @abstractmethod 

40 def forward(self, x: np.ndarray) -> np.ndarray: 

41 """Perform the forward pass of the membership function. 

42 

43 Args: 

44 x: Input array for which the membership values are computed. 

45 

46 Returns: 

47 np.ndarray: Array with the computed membership values. 

48 """ 

49 pass # pragma: no cover 

50 

51 @abstractmethod 

52 def backward(self, dL_dy: np.ndarray) -> None: 

53 """Perform the backward pass for backpropagation. 

54 

55 Args: 

56 dL_dy: Gradient of the loss with respect to the output of this layer. 

57 

58 Returns: 

59 None 

60 """ 

61 pass # pragma: no cover 

62 

63 def __call__(self, x: np.ndarray) -> np.ndarray: 

64 """Call forward to compute membership values. 

65 

66 Args: 

67 x: Input array for which the membership values are computed. 

68 

69 Returns: 

70 np.ndarray: Array with the computed membership values. 

71 """ 

72 return self.forward(x) 

73 

74 def reset(self) -> None: 

75 """Reset internal state and accumulated gradients. 

76 

77 Returns: 

78 None 

79 """ 

80 for key in self.gradients: 

81 self.gradients[key] = 0.0 

82 self.last_input = None 

83 self.last_output = None 

84 

85 def __str__(self) -> str: 

86 """Return a concise string representation of this membership function.""" 

87 params = ", ".join(f"{key}={value:.3f}" for key, value in self.parameters.items()) 

88 return f"{self.__class__.__name__}({params})" 

89 

90 def __repr__(self) -> str: 

91 """Return a detailed representation of this membership function.""" 

92 return self.__str__() 

93 

94 

95class GaussianMF(MembershipFunction): 

96 """Gaussian Membership Function. 

97 

98 Implements a Gaussian (bell-shaped) membership function using the formula: 

99 μ(x) = exp(-((x - mean)² / (2 * sigma²))) 

100 

101 This function is commonly used in fuzzy logic systems due to its smooth 

102 and differentiable properties. 

103 """ 

104 

105 def __init__(self, mean: float = 0.0, sigma: float = 1.0): 

106 """Initialize with mean and standard deviation. 

107 

108 Args: 

109 mean: Mean of the Gaussian (center). Defaults to 0.0. 

110 sigma: Standard deviation (width). Defaults to 1.0. 

111 """ 

112 super().__init__() 

113 self.parameters = {"mean": mean, "sigma": sigma} 

114 # Initialize gradients to zero for all parameters 

115 self.gradients = dict.fromkeys(self.parameters.keys(), 0.0) 

116 

117 def forward(self, x: np.ndarray) -> np.ndarray: 

118 """Compute Gaussian membership values. 

119 

120 Args: 

121 x: Input array for which the membership values are computed. 

122 

123 Returns: 

124 np.ndarray: Array of Gaussian membership values. 

125 """ 

126 mean = self.parameters["mean"] 

127 sigma = self.parameters["sigma"] 

128 self.last_input = x 

129 self.last_output = np.exp(-((x - mean) ** 2) / (2 * sigma**2)) 

130 return self.last_output 

131 

132 def backward(self, dL_dy: np.ndarray) -> None: 

133 """Compute gradients w.r.t. parameters given upstream gradient. 

134 

135 Args: 

136 dL_dy: Gradient of the loss with respect to the output of this layer. 

137 

138 Returns: 

139 None 

140 """ 

141 mean = self.parameters["mean"] 

142 sigma = self.parameters["sigma"] 

143 

144 if self.last_input is None or self.last_output is None: 

145 raise RuntimeError("forward must be called before backward.") 

146 

147 x = self.last_input 

148 y = self.last_output 

149 

150 z = (x - mean) / sigma 

151 

152 # Derivatives of the Gaussian function 

153 dy_dmean = -y * z / sigma 

154 dy_dsigma = y * (z**2) / sigma 

155 

156 # Gradient with respect to mean 

157 dL_dmean = np.sum(dL_dy * dy_dmean) 

158 

159 # Gradient with respect to sigma 

160 dL_dsigma = np.sum(dL_dy * dy_dsigma) 

161 

162 # Update gradients 

163 self.gradients["mean"] += dL_dmean 

164 self.gradients["sigma"] += dL_dsigma 

165 

166 

167class Gaussian2MF(MembershipFunction): 

168 """Gaussian combination Membership Function (two-sided Gaussian). 

169 

170 This membership function uses Gaussian tails on both sides with an optional 

171 flat region in the middle. 

172 

173 Parameters: 

174 sigma1 (float): Standard deviation of the left Gaussian tail (must be > 0). 

175 c1 (float): Center of the left Gaussian tail. 

176 sigma2 (float): Standard deviation of the right Gaussian tail (must be > 0). 

177 c2 (float): Center of the right Gaussian tail. Must satisfy c1 <= c2. 

178 

179 Definition (with c1 <= c2): 

180 - For x < c1: μ(x) = exp(-((x - c1)^2) / (2*sigma1^2)) 

181 - For c1 <= x <= c2: μ(x) = 1 

182 - For x > c2: μ(x) = exp(-((x - c2)^2) / (2*sigma2^2)) 

183 

184 Special case (c1 == c2): asymmetric Gaussian centered at c1 with sigma1 on the 

185 left side and sigma2 on the right side (no flat region). 

186 """ 

187 

188 def __init__(self, sigma1: float = 1.0, c1: float = 0.0, sigma2: float = 1.0, c2: float = 0.0): 

189 """Initialize the membership function with two Gaussian components. 

190 

191 Args: 

192 sigma1 (float, optional): Standard deviation of the first Gaussian. Must be positive. Defaults to 1.0. 

193 c1 (float, optional): Center of the first Gaussian. Defaults to 0.0. 

194 sigma2 (float, optional): Standard deviation of the second Gaussian. Must be positive. Defaults to 1.0. 

195 c2 (float, optional): Center of the second Gaussian. Must satisfy c1 <= c2. Defaults to 0.0. 

196 

197 Raises: 

198 ValueError: If sigma1 or sigma2 are not positive. 

199 ValueError: If c1 > c2. 

200 

201 Attributes: 

202 parameters (dict): Dictionary containing the parameters 'sigma1', 'c1', 'sigma2', 'c2'. 

203 gradients (dict): Dictionary containing the gradients for each parameter, initialized to 0.0. 

204 """ 

205 super().__init__() 

206 if sigma1 <= 0: 

207 raise ValueError(f"Parameter 'sigma1' must be positive, got sigma1={sigma1}") 

208 if sigma2 <= 0: 

209 raise ValueError(f"Parameter 'sigma2' must be positive, got sigma2={sigma2}") 

210 if c1 > c2: 

211 raise ValueError(f"Parameters must satisfy c1 <= c2, got c1={c1}, c2={c2}") 

212 

213 self.parameters = {"sigma1": float(sigma1), "c1": float(c1), "sigma2": float(sigma2), "c2": float(c2)} 

214 self.gradients = {"sigma1": 0.0, "c1": 0.0, "sigma2": 0.0, "c2": 0.0} 

215 

216 def forward(self, x: np.ndarray) -> np.ndarray: 

217 """Compute two-sided Gaussian membership values. 

218 

219 The input space is divided by c1 and c2 into: 

220 - x < c1: left Gaussian tail with sigma1 centered at c1 

221 - c1 <= x <= c2: flat region (1.0) 

222 - x > c2: right Gaussian tail with sigma2 centered at c2 

223 

224 Args: 

225 x: Input array of values. 

226 

227 Returns: 

228 np.ndarray: Membership degrees for each input value. 

229 """ 

230 x = np.asarray(x, dtype=float) 

231 self.last_input = x 

232 

233 s1 = self.parameters["sigma1"] 

234 c1 = self.parameters["c1"] 

235 s2 = self.parameters["sigma2"] 

236 c2 = self.parameters["c2"] 

237 

238 y = np.zeros_like(x, dtype=float) 

239 

240 # Regions 

241 left_mask = x < c1 

242 mid_mask = (x >= c1) & (x <= c2) 

243 right_mask = x > c2 

244 

245 if np.any(left_mask): 

246 xl = x[left_mask] 

247 y[left_mask] = np.exp(-((xl - c1) ** 2) / (2.0 * s1 * s1)) 

248 

249 if np.any(mid_mask): 

250 y[mid_mask] = 1.0 

251 

252 if np.any(right_mask): 

253 xr = x[right_mask] 

254 y[right_mask] = np.exp(-((xr - c2) ** 2) / (2.0 * s2 * s2)) 

255 

256 self.last_output = y 

257 return y 

258 

259 def backward(self, dL_dy: np.ndarray) -> None: 

260 """Accumulate parameter gradients for the two-sided Gaussian. 

261 

262 The flat middle region contributes no gradients. 

263 

264 Args: 

265 dL_dy: Upstream gradient of the loss w.r.t. the output. 

266 

267 Returns: 

268 None 

269 """ 

270 if self.last_input is None or self.last_output is None: 

271 return 

272 

273 x = self.last_input 

274 dL_dy = np.asarray(dL_dy) 

275 

276 s1 = self.parameters["sigma1"] 

277 c1 = self.parameters["c1"] 

278 s2 = self.parameters["sigma2"] 

279 c2 = self.parameters["c2"] 

280 

281 # Regions 

282 left_mask = x < c1 

283 mid_mask = (x >= c1) & (x <= c2) 

284 right_mask = x > c2 

285 

286 # Left Gaussian tail contributions (treat like a GaussianMF on that region) 

287 if np.any(left_mask): 

288 xl = x[left_mask] 

289 yl = np.exp(-((xl - c1) ** 2) / (2.0 * s1 * s1)) 

290 z1 = (xl - c1) / s1 

291 # Match GaussianMF derivative conventions 

292 dmu_dc1 = yl * z1 / s1 

293 dmu_dsigma1 = yl * (z1**2) / s1 

294 

295 dL_dc1 = np.sum(dL_dy[left_mask] * dmu_dc1) 

296 dL_dsigma1 = np.sum(dL_dy[left_mask] * dmu_dsigma1) 

297 

298 self.gradients["c1"] += float(dL_dc1) 

299 self.gradients["sigma1"] += float(dL_dsigma1) 

300 

301 # Mid region (flat) contributes no gradients 

302 _ = mid_mask # placeholder to document intentional no-op 

303 

304 # Right Gaussian tail contributions 

305 if np.any(right_mask): 

306 xr = x[right_mask] 

307 yr = np.exp(-((xr - c2) ** 2) / (2.0 * s2 * s2)) 

308 z2 = (xr - c2) / s2 

309 dmu_dc2 = yr * z2 / s2 

310 dmu_dsigma2 = yr * (z2**2) / s2 

311 

312 dL_dc2 = np.sum(dL_dy[right_mask] * dmu_dc2) 

313 dL_dsigma2 = np.sum(dL_dy[right_mask] * dmu_dsigma2) 

314 

315 self.gradients["c2"] += float(dL_dc2) 

316 self.gradients["sigma2"] += float(dL_dsigma2) 

317 

318 

319class TriangularMF(MembershipFunction): 

320 """Triangular Membership Function. 

321 

322 Implements a triangular membership function using piecewise linear segments: 

323 μ(x) = { 0, x ≤ a or x ≥ c 

324 { (x-a)/(b-a), a < x < b 

325 { (c-x)/(c-b), b ≤ x < c 

326 

327 Parameters: 

328 a (float): Left base point of the triangle. 

329 b (float): Peak point of the triangle (μ(b) = 1). 

330 c (float): Right base point of the triangle. 

331 

332 Note: 

333 Must satisfy: a ≤ b ≤ c 

334 """ 

335 

336 def __init__(self, a: float, b: float, c: float): 

337 """Initialize the triangular membership function. 

338 

339 Args: 

340 a: Left base point (must satisfy a ≤ b). 

341 b: Peak point (must satisfy a ≤ b ≤ c). 

342 c: Right base point (must satisfy b ≤ c). 

343 

344 Raises: 

345 ValueError: If parameters do not satisfy a ≤ b ≤ c or if a == c (zero width). 

346 """ 

347 super().__init__() 

348 

349 if not (a <= b <= c): 

350 raise ValueError(f"Triangular MF parameters must satisfy a ≤ b ≤ c, got a={a}, b={b}, c={c}") 

351 if a == c: 

352 raise ValueError("Parameters 'a' and 'c' cannot be equal (zero width triangle)") 

353 

354 self.parameters = {"a": float(a), "b": float(b), "c": float(c)} 

355 self.gradients = dict.fromkeys(self.parameters.keys(), 0.0) 

356 

357 def forward(self, x: np.ndarray) -> np.ndarray: 

358 """Compute triangular membership values μ(x). 

359 

360 Uses piecewise linear segments defined by (a, b, c): 

361 - 0 outside [a, c] 

362 - rising slope in (a, b) 

363 - peak 1 at x == b 

364 - falling slope in (b, c) 

365 

366 Args: 

367 x: Input array. 

368 

369 Returns: 

370 np.ndarray: Membership values in [0, 1] with the same shape as x. 

371 """ 

372 a, b, c = self.parameters["a"], self.parameters["b"], self.parameters["c"] 

373 self.last_input = x 

374 

375 output = np.zeros_like(x, dtype=float) 

376 

377 # Left slope 

378 if b > a: 

379 left_mask = (x > a) & (x < b) 

380 output[left_mask] = (x[left_mask] - a) / (b - a) 

381 

382 # Peak 

383 peak_mask = x == b 

384 output[peak_mask] = 1.0 

385 

386 # Right slope 

387 if c > b: 

388 right_mask = (x > b) & (x < c) 

389 output[right_mask] = (c - x[right_mask]) / (c - b) 

390 

391 # Clip for numerical stability 

392 output = cast(np.ndarray, np.clip(output, 0.0, 1.0)) 

393 

394 self.last_output = output 

395 return output 

396 

397 def backward(self, dL_dy: np.ndarray) -> None: 

398 """Accumulate gradients for a, b, c given upstream gradient. 

399 

400 Computes analytical derivatives for the rising (a, b) and falling (b, c) 

401 regions and sums them over the batch. 

402 

403 Args: 

404 dL_dy: Gradient of the loss w.r.t. μ(x); same shape or broadcastable to output. 

405 

406 Returns: 

407 None 

408 """ 

409 if self.last_input is None or self.last_output is None: 

410 return 

411 

412 a, b, c = self.parameters["a"], self.parameters["b"], self.parameters["c"] 

413 x = self.last_input 

414 

415 dL_da = 0.0 

416 dL_db = 0.0 

417 dL_dc = 0.0 

418 

419 # Left slope: a < x < b 

420 if b > a: 

421 left_mask = (x > a) & (x < b) 

422 if np.any(left_mask): 

423 x_left = x[left_mask] 

424 dL_dy_left = dL_dy[left_mask] 

425 

426 # ∂μ/∂a = (x - b) / (b - a)^2 

427 dmu_da_left = (x_left - b) / ((b - a) ** 2) 

428 dL_da += np.sum(dL_dy_left * dmu_da_left) 

429 

430 # ∂μ/∂b = -(x - a) / (b - a)^2 

431 dmu_db_left = -(x_left - a) / ((b - a) ** 2) 

432 dL_db += np.sum(dL_dy_left * dmu_db_left) 

433 

434 # Right slope: b < x < c 

435 if c > b: 

436 right_mask = (x > b) & (x < c) 

437 if np.any(right_mask): 

438 x_right = x[right_mask] 

439 dL_dy_right = dL_dy[right_mask] 

440 

441 # ∂μ/∂b = (x - c) / (c - b)^2 

442 dmu_db_right = (x_right - c) / ((c - b) ** 2) 

443 dL_db += np.sum(dL_dy_right * dmu_db_right) 

444 

445 # ∂μ/∂c = (x - b) / (c - b)^2 

446 dmu_dc_right = (x_right - b) / ((c - b) ** 2) 

447 dL_dc += np.sum(dL_dy_right * dmu_dc_right) 

448 

449 # Update gradients 

450 self.gradients["a"] += dL_da 

451 self.gradients["b"] += dL_db 

452 self.gradients["c"] += dL_dc 

453 

454 

455class TrapezoidalMF(MembershipFunction): 

456 """Trapezoidal Membership Function. 

457 

458 Implements a trapezoidal membership function using piecewise linear segments: 

459 μ(x) = { 0, x ≤ a or x ≥ d 

460 { (x-a)/(b-a), a < x < b 

461 { 1, b ≤ x ≤ c 

462 { (d-x)/(d-c), c < x < d 

463 

464 This function is commonly used in fuzzy logic systems when you need a plateau 

465 region of full membership, providing robustness to noise and uncertainty. 

466 

467 Parameters: 

468 a (float): Left base point of the trapezoid (lower support bound). 

469 b (float): Left peak point (start of plateau where μ(x) = 1). 

470 c (float): Right peak point (end of plateau where μ(x) = 1). 

471 d (float): Right base point of the trapezoid (upper support bound). 

472 

473 Note: 

474 Parameters must satisfy: a ≤ b ≤ c ≤ d for a valid trapezoidal function. 

475 """ 

476 

477 def __init__(self, a: float, b: float, c: float, d: float): 

478 """Initialize the trapezoidal membership function. 

479 

480 Args: 

481 a: Left base point (μ(a) = 0). 

482 b: Left peak point (μ(b) = 1, start of plateau). 

483 c: Right peak point (μ(c) = 1, end of plateau). 

484 d: Right base point (μ(d) = 0). 

485 

486 Raises: 

487 ValueError: If parameters don't satisfy a ≤ b ≤ c ≤ d. 

488 """ 

489 super().__init__() 

490 

491 # Validate parameters 

492 if not (a <= b <= c <= d): 

493 raise ValueError(f"Trapezoidal MF parameters must satisfy a ≤ b ≤ c ≤ d, got a={a}, b={b}, c={c}, d={d}") 

494 

495 if a == d: 

496 raise ValueError("Parameters 'a' and 'd' cannot be equal (zero width trapezoid)") 

497 

498 self.parameters = {"a": float(a), "b": float(b), "c": float(c), "d": float(d)} 

499 # Initialize gradients to zero for all parameters 

500 self.gradients = dict.fromkeys(self.parameters.keys(), 0.0) 

501 

502 def forward(self, x: np.ndarray) -> np.ndarray: 

503 """Compute trapezoidal membership values. 

504 

505 Args: 

506 x: Input array. 

507 

508 Returns: 

509 np.ndarray: Array containing the trapezoidal membership values. 

510 """ 

511 a = self.parameters["a"] 

512 b = self.parameters["b"] 

513 c = self.parameters["c"] 

514 d = self.parameters["d"] 

515 

516 self.last_input = x 

517 

518 # Initialize output with zeros 

519 output = np.zeros_like(x) 

520 

521 # Left slope: (x - a) / (b - a) for a < x < b 

522 if b > a: # Avoid division by zero 

523 left_mask = (x > a) & (x < b) 

524 output[left_mask] = (x[left_mask] - a) / (b - a) 

525 

526 # Plateau: μ(x) = 1 for b ≤ x ≤ c 

527 plateau_mask = (x >= b) & (x <= c) 

528 output[plateau_mask] = 1.0 

529 

530 # Right slope: (d - x) / (d - c) for c < x < d 

531 if d > c: # Avoid division by zero 

532 right_mask = (x > c) & (x < d) 

533 output[right_mask] = (d - x[right_mask]) / (d - c) 

534 

535 # Values outside [a, d] are already zero 

536 

537 self.last_output = output 

538 return output 

539 

540 def backward(self, dL_dy: np.ndarray) -> None: 

541 """Compute gradients for parameters based on upstream loss gradient. 

542 

543 Analytical gradients for the piecewise linear function: 

544 - ∂μ/∂a: left slope 

545 - ∂μ/∂b: left slope and plateau transition 

546 - ∂μ/∂c: right slope and plateau transition 

547 - ∂μ/∂d: right slope 

548 

549 Args: 

550 dL_dy: Gradient of the loss w.r.t. the output of this layer. 

551 

552 Returns: 

553 None 

554 """ 

555 if self.last_input is None or self.last_output is None: 

556 return 

557 

558 a = self.parameters["a"] 

559 b = self.parameters["b"] 

560 c = self.parameters["c"] 

561 d = self.parameters["d"] 

562 

563 x = self.last_input 

564 

565 # Initialize gradients 

566 dL_da = 0.0 

567 dL_db = 0.0 

568 dL_dc = 0.0 

569 dL_dd = 0.0 

570 

571 # Left slope region: a < x < b, μ(x) = (x-a)/(b-a) 

572 if b > a: 

573 left_mask = (x > a) & (x < b) 

574 if np.any(left_mask): 

575 x_left = x[left_mask] 

576 dL_dy_left = dL_dy[left_mask] 

577 

578 # ∂μ/∂a = -1/(b-a) for left slope 

579 dmu_da_left = -1.0 / (b - a) 

580 dL_da += np.sum(dL_dy_left * dmu_da_left) 

581 

582 # ∂μ/∂b = -(x-a)/(b-a)² for left slope 

583 dmu_db_left = -(x_left - a) / ((b - a) ** 2) 

584 dL_db += np.sum(dL_dy_left * dmu_db_left) 

585 

586 # Plateau region: b ≤ x ≤ c, μ(x) = 1 

587 # No gradients for plateau region (constant function) 

588 

589 # Right slope region: c < x < d, μ(x) = (d-x)/(d-c) 

590 if d > c: 

591 right_mask = (x > c) & (x < d) 

592 if np.any(right_mask): 

593 x_right = x[right_mask] 

594 dL_dy_right = dL_dy[right_mask] 

595 

596 # ∂μ/∂c = (x-d)/(d-c)² for right slope 

597 dmu_dc_right = (x_right - d) / ((d - c) ** 2) 

598 dL_dc += np.sum(dL_dy_right * dmu_dc_right) 

599 

600 # ∂μ/∂d = (x-c)/(d-c)² for right slope (derivative of (d-x)/(d-c) w.r.t. d) 

601 dmu_dd_right = (x_right - c) / ((d - c) ** 2) 

602 dL_dd += np.sum(dL_dy_right * dmu_dd_right) 

603 

604 # Update gradients (accumulate for batch processing) 

605 self.gradients["a"] += dL_da 

606 self.gradients["b"] += dL_db 

607 self.gradients["c"] += dL_dc 

608 self.gradients["d"] += dL_dd 

609 

610 

611class BellMF(MembershipFunction): 

612 """Bell-shaped (Generalized Bell) Membership Function. 

613 

614 Implements a bell-shaped membership function using the formula: 

615 μ(x) = 1 / (1 + |((x - c) / a)|^(2b)) 

616 

617 This function is a generalization of the Gaussian function and provides 

618 more flexibility in controlling the shape through the 'b' parameter. 

619 It's particularly useful when you need asymmetric membership functions 

620 or want to fine-tune the slope characteristics. 

621 

622 Parameters: 

623 a (float): Width parameter (positive). Controls the width of the curve. 

624 b (float): Slope parameter (positive). Controls the steepness of the curve. 

625 c (float): Center parameter. Controls the center position of the curve. 

626 

627 Note: 

628 Parameters 'a' and 'b' must be positive for a valid bell function. 

629 """ 

630 

631 def __init__(self, a: float = 1.0, b: float = 2.0, c: float = 0.0): 

632 """Initialize with width, slope, and center parameters. 

633 

634 Args: 

635 a: Width parameter (must be positive). Defaults to 1.0. 

636 b: Slope parameter (must be positive). Defaults to 2.0. 

637 c: Center parameter. Defaults to 0.0. 

638 

639 Raises: 

640 ValueError: If 'a' or 'b' are not positive. 

641 """ 

642 super().__init__() 

643 

644 # Validate parameters 

645 if a <= 0: 

646 raise ValueError(f"Parameter 'a' must be positive, got a={a}") 

647 

648 if b <= 0: 

649 raise ValueError(f"Parameter 'b' must be positive, got b={b}") 

650 

651 self.parameters = {"a": float(a), "b": float(b), "c": float(c)} 

652 # Initialize gradients to zero for all parameters 

653 self.gradients = dict.fromkeys(self.parameters.keys(), 0.0) 

654 

655 def forward(self, x: np.ndarray) -> np.ndarray: 

656 """Compute bell membership values. 

657 

658 Args: 

659 x: Input array for which the membership values are computed. 

660 

661 Returns: 

662 np.ndarray: Array of bell membership values. 

663 """ 

664 a = self.parameters["a"] 

665 b = self.parameters["b"] 

666 c = self.parameters["c"] 

667 

668 self.last_input = x 

669 

670 # Compute the bell function: μ(x) = 1 / (1 + |((x - c) / a)|^(2b)) 

671 # To avoid numerical issues, we use the absolute value and handle edge cases 

672 

673 # Compute (x - c) / a 

674 normalized = (x - c) / a 

675 

676 # Compute |normalized|^(2b) 

677 # Use np.abs to handle negative values properly 

678 abs_normalized = np.abs(normalized) 

679 

680 # Handle the case where abs_normalized is very close to zero 

681 with np.errstate(divide="ignore", invalid="ignore"): 

682 power_term = np.power(abs_normalized, 2 * b) 

683 # Replace any inf or nan with a very large number to make output close to 0 

684 power_term = np.where(np.isfinite(power_term), power_term, 1e10) 

685 

686 # Compute the final result 

687 output = 1.0 / (1.0 + power_term) 

688 

689 self.last_output = output 

690 return output 

691 

692 def backward(self, dL_dy: np.ndarray) -> None: 

693 """Compute parameter gradients given upstream gradient. 

694 

695 Analytical gradients: 

696 - ∂μ/∂a: width 

697 - ∂μ/∂b: steepness 

698 - ∂μ/∂c: center 

699 

700 Args: 

701 dL_dy: Gradient of the loss w.r.t. the output of this layer. 

702 

703 Returns: 

704 None 

705 """ 

706 a = self.parameters["a"] 

707 b = self.parameters["b"] 

708 c = self.parameters["c"] 

709 

710 if self.last_input is None or self.last_output is None: 

711 raise RuntimeError("forward must be called before backward.") 

712 

713 x = self.last_input 

714 y = self.last_output # This is μ(x) 

715 

716 # Intermediate calculations 

717 normalized = (x - c) / a 

718 abs_normalized = np.abs(normalized) 

719 

720 # Avoid division by zero and numerical issues 

721 # Only compute gradients where abs_normalized > epsilon 

722 epsilon = 1e-12 

723 valid_mask = abs_normalized > epsilon 

724 

725 if not np.any(valid_mask): 

726 # If all values are at the peak (x ≈ c), gradients are zero 

727 return 

728 

729 # Initialize gradients 

730 dL_da = 0.0 

731 dL_db = 0.0 

732 dL_dc = 0.0 

733 

734 # Only compute where we have valid values 

735 x_valid = x[valid_mask] 

736 y_valid = y[valid_mask] 

737 dL_dy_valid = dL_dy[valid_mask] 

738 normalized_valid = (x_valid - c) / a 

739 abs_normalized_valid = np.abs(normalized_valid) 

740 

741 # Power term: |normalized|^(2b) 

742 power_term_valid = np.power(abs_normalized_valid, 2 * b) 

743 

744 # For the bell function μ = 1/(1 + z) where z = |normalized|^(2b) 

745 # ∂μ/∂z = -1/(1 + z)² = -μ² 

746 dmu_dz = -y_valid * y_valid 

747 

748 # Chain rule: ∂L/∂param = ∂L/∂μ × ∂μ/∂z × ∂z/∂param 

749 

750 # ∂z/∂a = ∂(|normalized|^(2b))/∂a 

751 # = 2b × |normalized|^(2b-1) × ∂|normalized|/∂a 

752 # = 2b × |normalized|^(2b-1) × sign(normalized) × ∂normalized/∂a 

753 # = 2b × |normalized|^(2b-1) × sign(normalized) × (-(x-c)/a²) 

754 # = -2b × |normalized|^(2b-1) × sign(normalized) × (x-c)/a² 

755 

756 sign_normalized = np.sign(normalized_valid) 

757 dz_da = -2 * b * np.power(abs_normalized_valid, 2 * b - 1) * sign_normalized * (x_valid - c) / (a * a) 

758 dL_da += np.sum(dL_dy_valid * dmu_dz * dz_da) 

759 

760 # ∂z/∂b = ∂(|normalized|^(2b))/∂b 

761 # = |normalized|^(2b) × ln(|normalized|) × 2 

762 # But ln(|normalized|) can be problematic near zero, so we use a safe version 

763 with np.errstate(divide="ignore", invalid="ignore"): 

764 ln_abs_normalized = np.log(abs_normalized_valid) 

765 ln_abs_normalized = np.where(np.isfinite(ln_abs_normalized), ln_abs_normalized, 0.0) 

766 

767 dz_db = 2 * power_term_valid * ln_abs_normalized 

768 dL_db += np.sum(dL_dy_valid * dmu_dz * dz_db) 

769 

770 # ∂z/∂c = ∂(|normalized|^(2b))/∂c 

771 # = 2b × |normalized|^(2b-1) × sign(normalized) × ∂normalized/∂c 

772 # = 2b × |normalized|^(2b-1) × sign(normalized) × (-1/a) 

773 # = -2b × |normalized|^(2b-1) × sign(normalized) / a 

774 

775 dz_dc = -2 * b * np.power(abs_normalized_valid, 2 * b - 1) * sign_normalized / a 

776 dL_dc += np.sum(dL_dy_valid * dmu_dz * dz_dc) 

777 

778 # Update gradients (accumulate for batch processing) 

779 self.gradients["a"] += dL_da 

780 self.gradients["b"] += dL_db 

781 self.gradients["c"] += dL_dc 

782 

783 

784class SigmoidalMF(MembershipFunction): 

785 """Sigmoidal Membership Function. 

786 

787 Implements a sigmoidal (S-shaped) membership function using the formula: 

788 μ(x) = 1 / (1 + exp(-a(x - c))) 

789 

790 This function provides a smooth S-shaped curve that transitions from 0 to 1. 

791 It's particularly useful for modeling gradual transitions and is commonly 

792 used in neural networks and fuzzy systems. 

793 

794 Parameters: 

795 a (float): Slope parameter. Controls the steepness of the sigmoid. 

796 - Positive values: standard sigmoid (0 → 1 as x increases) 

797 - Negative values: inverted sigmoid (1 → 0 as x increases) 

798 - Larger |a|: steeper transition 

799 c (float): Center parameter. Controls the inflection point where μ(c) = 0.5. 

800 

801 Note: 

802 Parameter 'a' cannot be zero (would result in constant function). 

803 """ 

804 

805 def __init__(self, a: float = 1.0, c: float = 0.0): 

806 """Initialize the sigmoidal membership function. 

807 

808 Args: 

809 a: Slope parameter (cannot be zero). Defaults to 1.0. 

810 c: Center parameter (inflection point). Defaults to 0.0. 

811 

812 Raises: 

813 ValueError: If 'a' is zero. 

814 """ 

815 super().__init__() 

816 

817 # Validate parameters 

818 if a == 0: 

819 raise ValueError(f"Parameter 'a' cannot be zero, got a={a}") 

820 

821 self.parameters = {"a": float(a), "c": float(c)} 

822 # Initialize gradients to zero for all parameters 

823 self.gradients = dict.fromkeys(self.parameters.keys(), 0.0) 

824 

825 def forward(self, x: np.ndarray) -> np.ndarray: 

826 """Compute sigmoidal membership values. 

827 

828 Args: 

829 x: Input array for which the membership values are computed. 

830 

831 Returns: 

832 np.ndarray: Array of sigmoidal membership values. 

833 """ 

834 a = self.parameters["a"] 

835 c = self.parameters["c"] 

836 

837 self.last_input = x 

838 

839 # Compute the sigmoid function: μ(x) = 1 / (1 + exp(-a(x - c))) 

840 # To avoid numerical overflow, we use a stable implementation 

841 

842 # Compute a(x - c) (note: not -a(x - c)) 

843 z = a * (x - c) 

844 

845 # Use stable sigmoid implementation to avoid overflow 

846 # Standard sigmoid: σ(z) = 1 / (1 + exp(-z)) 

847 # For numerical stability: 

848 # If z >= 0: σ(z) = 1 / (1 + exp(-z)) 

849 # If z < 0: σ(z) = exp(z) / (1 + exp(z)) 

850 

851 output = np.zeros_like(x, dtype=float) 

852 

853 # Case 1: z >= 0 (standard case) 

854 mask_pos = z >= 0 

855 if np.any(mask_pos): 

856 output[mask_pos] = 1.0 / (1.0 + np.exp(-z[mask_pos])) 

857 

858 # Case 2: z < 0 (to avoid exp overflow) 

859 mask_neg = z < 0 

860 if np.any(mask_neg): 

861 exp_z = np.exp(z[mask_neg]) 

862 output[mask_neg] = exp_z / (1.0 + exp_z) 

863 

864 self.last_output = output 

865 return output 

866 

867 def backward(self, dL_dy: np.ndarray) -> None: 

868 """Compute parameter gradients given upstream gradient. 

869 

870 For μ(x) = 1/(1 + exp(-a(x-c))): 

871 - ∂μ/∂a = μ(x)(1-μ(x))(x-c) 

872 - ∂μ/∂c = -aμ(x)(1-μ(x)) 

873 

874 Args: 

875 dL_dy: Gradient of the loss w.r.t. the output of this layer. 

876 

877 Returns: 

878 None 

879 """ 

880 a = self.parameters["a"] 

881 c = self.parameters["c"] 

882 

883 if self.last_input is None or self.last_output is None: 

884 raise RuntimeError("forward must be called before backward.") 

885 

886 x = self.last_input 

887 y = self.last_output # This is μ(x) 

888 

889 # For sigmoid: ∂μ/∂z = μ(1-μ) where z = -a(x-c) 

890 # This is a fundamental property of the sigmoid function 

891 dmu_dz = y * (1.0 - y) 

892 

893 # Chain rule: ∂L/∂param = ∂L/∂μ × ∂μ/∂z × ∂z/∂param 

894 

895 # For z = a(x-c): 

896 # ∂z/∂a = (x-c) 

897 # ∂z/∂c = -a 

898 

899 # Gradient w.r.t. 'a' 

900 dz_da = x - c 

901 dL_da = np.sum(dL_dy * dmu_dz * dz_da) 

902 

903 # Gradient w.r.t. 'c' 

904 dz_dc = -a 

905 dL_dc = np.sum(dL_dy * dmu_dz * dz_dc) 

906 

907 # Update gradients (accumulate for batch processing) 

908 self.gradients["a"] += dL_da 

909 self.gradients["c"] += dL_dc 

910 

911 

912class DiffSigmoidalMF(MembershipFunction): 

913 """Difference of two sigmoidal functions. 

914 

915 Implements y = s1(x) - s2(x), where each s is a logistic curve with its 

916 own slope and center parameters. 

917 """ 

918 

919 def __init__(self, a1: float, c1: float, a2: float, c2: float): 

920 """Initializes the membership function with two sets of parameters. 

921 

922 Args: 

923 a1 (float): The first 'a' parameter for the membership function. 

924 c1 (float): The first 'c' parameter for the membership function. 

925 a2 (float): The second 'a' parameter for the membership function. 

926 c2 (float): The second 'c' parameter for the membership function. 

927 

928 Attributes: 

929 parameters (dict): Dictionary containing the membership function parameters. 

930 gradients (dict): Dictionary containing gradients for each parameter, initialized to 0.0. 

931 last_input: Stores the last input value (initially None). 

932 last_output: Stores the last output value (initially None). 

933 """ 

934 super().__init__() 

935 self.parameters = { 

936 "a1": float(a1), 

937 "c1": float(c1), 

938 "a2": float(a2), 

939 "c2": float(c2), 

940 } 

941 self.gradients = dict.fromkeys(self.parameters, 0.0) 

942 self.last_input = None 

943 self.last_output = None 

944 

945 def forward(self, x: np.ndarray) -> np.ndarray: 

946 """Compute y = s1(x) - s2(x). 

947 

948 Args: 

949 x: Input array. 

950 

951 Returns: 

952 np.ndarray: Membership values for the input. 

953 """ 

954 x = np.asarray(x, dtype=float) 

955 self.last_input = x 

956 a1, c1 = self.parameters["a1"], self.parameters["c1"] 

957 a2, c2 = self.parameters["a2"], self.parameters["c2"] 

958 

959 s1 = 1.0 / (1.0 + np.exp(-a1 * (x - c1))) 

960 s2 = 1.0 / (1.0 + np.exp(-a2 * (x - c2))) 

961 y = s1 - s2 

962 

963 self.last_output = y 

964 self._s1, self._s2 = s1, s2 # store for backward 

965 return y 

966 

967 def backward(self, dL_dy: np.ndarray) -> None: 

968 """Compute gradients w.r.t. parameters and optionally input. 

969 

970 Args: 

971 dL_dy: Gradient of the loss w.r.t. the output. 

972 

973 Returns: 

974 np.ndarray | None: Gradient of the loss w.r.t. the input, if available. 

975 """ 

976 if self.last_input is None or self.last_output is None: 

977 return 

978 

979 x = self.last_input 

980 dL_dy = np.asarray(dL_dy) 

981 a1, c1 = self.parameters["a1"], self.parameters["c1"] 

982 a2, c2 = self.parameters["a2"], self.parameters["c2"] 

983 s1, s2 = self._s1, self._s2 

984 

985 # Sigmoid derivatives 

986 ds1_da1 = (x - c1) * s1 * (1 - s1) 

987 ds1_dc1 = -a1 * s1 * (1 - s1) 

988 ds2_da2 = (x - c2) * s2 * (1 - s2) 

989 ds2_dc2 = -a2 * s2 * (1 - s2) 

990 

991 # Parameter gradients 

992 self.gradients["a1"] += float(np.sum(dL_dy * ds1_da1)) 

993 self.gradients["c1"] += float(np.sum(dL_dy * ds1_dc1)) 

994 self.gradients["a2"] += float(np.sum(dL_dy * -ds2_da2)) 

995 self.gradients["c2"] += float(np.sum(dL_dy * -ds2_dc2)) 

996 

997 # Gradient w.r.t. input (optional, for chaining) 

998 # dmu_dx = a1 * s1 * (1 - s1) - a2 * s2 * (1 - s2) 

999 

1000 

1001class ProdSigmoidalMF(MembershipFunction): 

1002 """Product of two sigmoidal functions. 

1003 

1004 Implements μ(x) = s1(x) * s2(x) with separate parameters for each sigmoid. 

1005 """ 

1006 

1007 def __init__(self, a1: float, c1: float, a2: float, c2: float): 

1008 """Initializes the membership function with specified parameters. 

1009 

1010 Args: 

1011 a1 (float): The first parameter for the membership function. 

1012 c1 (float): The second parameter for the membership function. 

1013 a2 (float): The third parameter for the membership function. 

1014 c2 (float): The fourth parameter for the membership function. 

1015 

1016 Attributes: 

1017 parameters (dict): Dictionary containing the membership function parameters. 

1018 gradients (dict): Dictionary containing gradients for each parameter, initialized to 0.0. 

1019 last_input: Stores the last input value (initialized to None). 

1020 last_output: Stores the last output value (initialized to None). 

1021 """ 

1022 super().__init__() 

1023 self.parameters = { 

1024 "a1": float(a1), 

1025 "c1": float(c1), 

1026 "a2": float(a2), 

1027 "c2": float(c2), 

1028 } 

1029 self.gradients = dict.fromkeys(self.parameters, 0.0) 

1030 self.last_input = None 

1031 self.last_output = None 

1032 

1033 def forward(self, x: np.ndarray) -> np.ndarray: 

1034 """Computes the membership value(s) for input x using the product of two sigmoidal functions. 

1035 

1036 Args: 

1037 x (np.ndarray): Input array to the membership function. 

1038 

1039 Returns: 

1040 np.ndarray: Output array after applying the membership function. 

1041 """ 

1042 x = np.asarray(x, dtype=float) 

1043 self.last_input = x 

1044 a1, c1 = self.parameters["a1"], self.parameters["c1"] 

1045 a2, c2 = self.parameters["a2"], self.parameters["c2"] 

1046 

1047 s1 = 1.0 / (1.0 + np.exp(-a1 * (x - c1))) 

1048 s2 = 1.0 / (1.0 + np.exp(-a2 * (x - c2))) 

1049 y = s1 * s2 

1050 

1051 self.last_output = y 

1052 self._s1, self._s2 = s1, s2 # store for backward 

1053 return y 

1054 

1055 def backward(self, dL_dy: np.ndarray) -> None: 

1056 """Compute parameter gradients and optionally return input gradient. 

1057 

1058 Args: 

1059 dL_dy: Gradient of the loss w.r.t. the output. 

1060 

1061 Returns: 

1062 np.ndarray | None: Gradient of the loss w.r.t. the input, if available. 

1063 """ 

1064 if self.last_input is None or self.last_output is None: 

1065 return 

1066 

1067 x = self.last_input 

1068 dL_dy = np.asarray(dL_dy) 

1069 a1, c1 = self.parameters["a1"], self.parameters["c1"] 

1070 a2, c2 = self.parameters["a2"], self.parameters["c2"] 

1071 s1, s2 = self._s1, self._s2 

1072 

1073 # derivatives of sigmoids w.r.t. parameters 

1074 ds1_da1 = (x - c1) * s1 * (1 - s1) 

1075 ds1_dc1 = -a1 * s1 * (1 - s1) 

1076 ds2_da2 = (x - c2) * s2 * (1 - s2) 

1077 ds2_dc2 = -a2 * s2 * (1 - s2) 

1078 

1079 # parameter gradients using product rule 

1080 self.gradients["a1"] += float(np.sum(dL_dy * ds1_da1 * s2)) 

1081 self.gradients["c1"] += float(np.sum(dL_dy * ds1_dc1 * s2)) 

1082 self.gradients["a2"] += float(np.sum(dL_dy * s1 * ds2_da2)) 

1083 self.gradients["c2"] += float(np.sum(dL_dy * s1 * ds2_dc2)) 

1084 

1085 # gradient w.r.t. input (optional) 

1086 # dmu_dx = a1 * s1 * (1 - s1) * s2 + a2 * s2 * (1 - s2) * s1 

1087 

1088 

1089class SShapedMF(MembershipFunction): 

1090 """S-shaped Membership Function. 

1091 

1092 Smoothly transitions from 0 to 1 between two parameters a and b using the 

1093 smoothstep polynomial S(t) = 3t² - 2t³. Commonly used in fuzzy logic for 

1094 gradual onset of membership. 

1095 

1096 Definition with a < b: 

1097 - μ(x) = 0, for x ≤ a 

1098 - μ(x) = 3t² - 2t³, t = (x-a)/(b-a), for a < x < b 

1099 - μ(x) = 1, for x ≥ b 

1100 

1101 Parameters: 

1102 a (float): Left foot (start of transition from 0). 

1103 b (float): Right shoulder (end of transition at 1). 

1104 

1105 Note: 

1106 Requires a < b. 

1107 """ 

1108 

1109 def __init__(self, a: float, b: float): 

1110 """Initialize the membership function with parameters 'a' and 'b'. 

1111 

1112 Args: 

1113 a (float): The first parameter, must be less than 'b'. 

1114 b (float): The second parameter, must be greater than 'a'. 

1115 

1116 Raises: 

1117 ValueError: If 'a' is not less than 'b'. 

1118 

1119 Attributes: 

1120 parameters (dict): Dictionary containing 'a' and 'b' as floats. 

1121 gradients (dict): Dictionary containing gradients for 'a' and 'b', initialized to 0.0. 

1122 """ 

1123 super().__init__() 

1124 

1125 if not (a < b): 

1126 raise ValueError(f"Parameters must satisfy a < b, got a={a}, b={b}") 

1127 

1128 self.parameters = {"a": float(a), "b": float(b)} 

1129 self.gradients = {"a": 0.0, "b": 0.0} 

1130 

1131 def forward(self, x: np.ndarray) -> np.ndarray: 

1132 """Compute S-shaped membership values.""" 

1133 x = np.asarray(x) 

1134 self.last_input = x.copy() 

1135 

1136 a, b = self.parameters["a"], self.parameters["b"] 

1137 

1138 y = np.zeros_like(x, dtype=np.float64) 

1139 

1140 # Right side (x ≥ b): μ = 1 

1141 mask_right = x >= b 

1142 y[mask_right] = 1.0 

1143 

1144 # Transition region (a < x < b): μ = smoothstep(t) 

1145 mask_trans = (x > a) & (x < b) 

1146 if np.any(mask_trans): 

1147 x_t = x[mask_trans] 

1148 t = (x_t - a) / (b - a) 

1149 y[mask_trans] = _smoothstep(t) 

1150 

1151 # Left side (x ≤ a) remains 0 

1152 

1153 self.last_output = y.copy() 

1154 return y 

1155 

1156 def backward(self, dL_dy: np.ndarray) -> None: 

1157 """Accumulate gradients for a and b using analytical derivatives. 

1158 

1159 Uses S(t) = 3t² - 2t³, t = (x-a)/(b-a) on the transition region. 

1160 """ 

1161 if self.last_input is None or self.last_output is None: 

1162 return 

1163 

1164 x = self.last_input 

1165 dL_dy = np.asarray(dL_dy) 

1166 

1167 a, b = self.parameters["a"], self.parameters["b"] 

1168 

1169 # Only transition region contributes to parameter gradients 

1170 mask = (x >= a) & (x <= b) 

1171 if not (np.any(mask) and b != a): 

1172 return 

1173 

1174 x_t = x[mask] 

1175 dL_dy_t = dL_dy[mask] 

1176 t = (x_t - a) / (b - a) 

1177 

1178 # dS/dt = 6*t*(1-t) 

1179 dS_dt = _dsmoothstep_dt(t) 

1180 

1181 # dt/da and dt/db 

1182 dt_da = (x_t - b) / (b - a) ** 2 

1183 dt_db = -(x_t - a) / (b - a) ** 2 

1184 

1185 dS_da = dS_dt * dt_da 

1186 dS_db = dS_dt * dt_db 

1187 

1188 self.gradients["a"] += float(np.sum(dL_dy_t * dS_da)) 

1189 self.gradients["b"] += float(np.sum(dL_dy_t * dS_db)) 

1190 

1191 

1192class LinSShapedMF(MembershipFunction): 

1193 """Linear S-shaped saturation Membership Function. 

1194 

1195 Piecewise linear ramp from 0 to 1 between parameters a and b: 

1196 - μ(x) = 0, for x ≤ a 

1197 - μ(x) = (x - a) / (b - a), for a < x < b 

1198 - μ(x) = 1, for x ≥ b 

1199 

1200 Parameters: 

1201 a (float): Left foot (start of transition from 0). 

1202 b (float): Right shoulder (end of transition at 1). Requires a < b. 

1203 """ 

1204 

1205 def __init__(self, a: float, b: float): 

1206 """Initialize the membership function with parameters 'a' and 'b'. 

1207 

1208 Args: 

1209 a (float): The first parameter, must be less than 'b'. 

1210 b (float): The second parameter, must be greater than 'a'. 

1211 

1212 Raises: 

1213 ValueError: If 'a' is not less than 'b'. 

1214 

1215 Attributes: 

1216 parameters (dict): Dictionary containing 'a' and 'b' as floats. 

1217 gradients (dict): Dictionary containing gradients for 'a' and 'b', initialized to 0.0. 

1218 """ 

1219 super().__init__() 

1220 if not (a < b): 

1221 raise ValueError(f"Parameters must satisfy a < b, got a={a}, b={b}") 

1222 self.parameters = {"a": float(a), "b": float(b)} 

1223 self.gradients = {"a": 0.0, "b": 0.0} 

1224 

1225 def forward(self, x: np.ndarray) -> np.ndarray: 

1226 """Compute linear S-shaped membership values for x. 

1227 

1228 The rules based on a and b: 

1229 - x >= b: 1.0 (right saturated) 

1230 - a < x < b: linear ramp from 0 to 1 

1231 - x <= a: 0.0 (left) 

1232 

1233 Args: 

1234 x: Input array of values. 

1235 

1236 Returns: 

1237 np.ndarray: Output array with membership values. 

1238 """ 

1239 x = np.asarray(x, dtype=float) 

1240 self.last_input = x 

1241 a, b = self.parameters["a"], self.parameters["b"] 

1242 y = np.zeros_like(x, dtype=float) 

1243 # right saturated region 

1244 mask_right = x >= b 

1245 y[mask_right] = 1.0 

1246 # linear ramp 

1247 mask_mid = (x > a) & (x < b) 

1248 if np.any(mask_mid): 

1249 y[mask_mid] = (x[mask_mid] - a) / (b - a) 

1250 # left stays 0 

1251 self.last_output = y 

1252 return y 

1253 

1254 def backward(self, dL_dy: np.ndarray) -> None: 

1255 """Accumulate gradients for 'a' and 'b' in the ramp region. 

1256 

1257 Args: 

1258 dL_dy: Gradient of the loss w.r.t. the output. 

1259 

1260 Returns: 

1261 None 

1262 """ 

1263 if self.last_input is None or self.last_output is None: 

1264 return 

1265 x = self.last_input 

1266 dL_dy = np.asarray(dL_dy) 

1267 a, b = self.parameters["a"], self.parameters["b"] 

1268 d = b - a 

1269 if d == 0: 

1270 return 

1271 # Only ramp region contributes to parameter gradients 

1272 mask = (x > a) & (x < b) 

1273 if not np.any(mask): 

1274 return 

1275 xm = x[mask] 

1276 g = dL_dy[mask] 

1277 # μ = (x-a)/d with d = b-a 

1278 # ∂μ/∂a = -(1/d) + (x-a)/d^2 

1279 dmu_da = -(1.0 / d) + (xm - a) / (d * d) 

1280 # ∂μ/∂b = -(x-a)/d^2 

1281 dmu_db = -((xm - a) / (d * d)) 

1282 self.gradients["a"] += float(np.sum(g * dmu_da)) 

1283 self.gradients["b"] += float(np.sum(g * dmu_db)) 

1284 

1285 

1286class ZShapedMF(MembershipFunction): 

1287 """Z-shaped Membership Function. 

1288 

1289 Smoothly transitions from 1 to 0 between two parameters a and b using the 

1290 smoothstep polynomial S(t) = 3t² - 2t³ (Z = 1 - S). Commonly used in fuzzy 

1291 logic as the complement of the S-shaped function. 

1292 

1293 Definition with a < b: 

1294 - μ(x) = 1, for x ≤ a 

1295 - μ(x) = 1 - (3t² - 2t³), t = (x-a)/(b-a), for a < x < b 

1296 - μ(x) = 0, for x ≥ b 

1297 

1298 Parameters: 

1299 a (float): Left shoulder (start of transition). 

1300 b (float): Right foot (end of transition). 

1301 

1302 Note: 

1303 Requires a < b. In the degenerate case a == b, the function becomes an 

1304 instantaneous drop at x=a. 

1305 """ 

1306 

1307 def __init__(self, a: float, b: float): 

1308 """Initialize the membership function with parameters a and b. 

1309 

1310 Args: 

1311 a: Lower bound parameter. 

1312 b: Upper bound parameter. 

1313 

1314 Raises: 

1315 ValueError: If a is not less than b. 

1316 """ 

1317 super().__init__() 

1318 

1319 if not (a < b): 

1320 raise ValueError(f"Parameters must satisfy a < b, got a={a}, b={b}") 

1321 

1322 self.parameters = {"a": float(a), "b": float(b)} 

1323 self.gradients = {"a": 0.0, "b": 0.0} 

1324 

1325 def forward(self, x: np.ndarray) -> np.ndarray: 

1326 """Compute Z-shaped membership values.""" 

1327 x = np.asarray(x) 

1328 self.last_input = x.copy() 

1329 

1330 a, b = self.parameters["a"], self.parameters["b"] 

1331 

1332 y = np.zeros_like(x, dtype=np.float64) 

1333 

1334 # Left side (x ≤ a): μ = 1 

1335 mask_left = x <= a 

1336 y[mask_left] = 1.0 

1337 

1338 # Transition region (a < x < b): μ = 1 - smoothstep(t) 

1339 mask_trans = (x > a) & (x < b) 

1340 if np.any(mask_trans): 

1341 x_t = x[mask_trans] 

1342 t = (x_t - a) / (b - a) 

1343 y[mask_trans] = 1.0 - _smoothstep(t) 

1344 

1345 # Right side (x ≥ b) remains 0 

1346 

1347 self.last_output = y.copy() 

1348 return y 

1349 

1350 def backward(self, dL_dy: np.ndarray) -> None: 

1351 """Accumulate gradients for a and b using analytical derivatives. 

1352 

1353 Uses Z(t) = 1 - (3t² - 2t³), t = (x-a)/(b-a) on the transition region. 

1354 """ 

1355 if self.last_input is None or self.last_output is None: 

1356 return 

1357 

1358 x = self.last_input 

1359 dL_dy = np.asarray(dL_dy) 

1360 

1361 a, b = self.parameters["a"], self.parameters["b"] 

1362 

1363 # Only transition region contributes to parameter gradients 

1364 mask = (x >= a) & (x <= b) 

1365 if not (np.any(mask) and b != a): 

1366 return 

1367 

1368 x_t = x[mask] 

1369 dL_dy_t = dL_dy[mask] 

1370 t = (x_t - a) / (b - a) 

1371 

1372 # dZ/dt = -dS/dt = 6*t*(t-1) 

1373 dZ_dt = -_dsmoothstep_dt(t) 

1374 

1375 # dt/da and dt/db 

1376 dt_da = (x_t - b) / (b - a) ** 2 

1377 dt_db = -(x_t - a) / (b - a) ** 2 

1378 

1379 dZ_da = dZ_dt * dt_da 

1380 dZ_db = dZ_dt * dt_db 

1381 

1382 self.gradients["a"] += float(np.sum(dL_dy_t * dZ_da)) 

1383 self.gradients["b"] += float(np.sum(dL_dy_t * dZ_db)) 

1384 

1385 

1386class LinZShapedMF(MembershipFunction): 

1387 """Linear Z-shaped saturation Membership Function. 

1388 

1389 Piecewise linear ramp from 1 to 0 between parameters a and b: 

1390 - μ(x) = 1, for x ≤ a 

1391 - μ(x) = (b - x) / (b - a), for a < x < b 

1392 - μ(x) = 0, for x ≥ b 

1393 

1394 Parameters: 

1395 a (float): Left shoulder (end of saturation at 1). 

1396 b (float): Right foot (end of transition to 0). Requires a < b. 

1397 """ 

1398 

1399 def __init__(self, a: float, b: float): 

1400 """Initialize the membership function with parameters 'a' and 'b'. 

1401 

1402 Args: 

1403 a (float): The first parameter of the membership function. Must be less than 'b'. 

1404 b (float): The second parameter of the membership function. 

1405 

1406 Raises: 

1407 ValueError: If 'a' is not less than 'b'. 

1408 

1409 Attributes: 

1410 parameters (dict): Dictionary containing 'a' and 'b' as floats. 

1411 gradients (dict): Dictionary containing gradients for 'a' and 'b', initialized to 0.0. 

1412 """ 

1413 super().__init__() 

1414 if not (a < b): 

1415 raise ValueError(f"Parameters must satisfy a < b, got a={a}, b={b}") 

1416 self.parameters = {"a": float(a), "b": float(b)} 

1417 self.gradients = {"a": 0.0, "b": 0.0} 

1418 

1419 def forward(self, x: np.ndarray) -> np.ndarray: 

1420 """Compute linear Z-shaped membership values for x. 

1421 

1422 Rules: 

1423 - x <= a: 1.0 (left saturated) 

1424 - a < x < b: linear ramp from 1 to 0 

1425 - x >= b: 0.0 (right) 

1426 

1427 Args: 

1428 x: Input array of values. 

1429 

1430 Returns: 

1431 np.ndarray: Output membership values for each input. 

1432 """ 

1433 x = np.asarray(x, dtype=float) 

1434 self.last_input = x 

1435 a, b = self.parameters["a"], self.parameters["b"] 

1436 y = np.zeros_like(x, dtype=float) 

1437 

1438 # left saturated region 

1439 mask_left = x <= a 

1440 y[mask_left] = 1.0 

1441 # linear ramp 

1442 mask_mid = (x > a) & (x < b) 

1443 if np.any(mask_mid): 

1444 y[mask_mid] = (b - x[mask_mid]) / (b - a) 

1445 # right stays 0 

1446 self.last_output = y 

1447 return y 

1448 

1449 def backward(self, dL_dy: np.ndarray) -> None: 

1450 """Accumulate gradients for 'a' and 'b'. 

1451 

1452 Args: 

1453 dL_dy: Gradient of the loss w.r.t. the output. 

1454 

1455 Returns: 

1456 None 

1457 """ 

1458 if self.last_input is None or self.last_output is None: 

1459 return 

1460 x = self.last_input 

1461 dL_dy = np.asarray(dL_dy) 

1462 a, b = self.parameters["a"], self.parameters["b"] 

1463 d = b - a 

1464 if d == 0: 

1465 return 

1466 mask = (x > a) & (x < b) 

1467 if not np.any(mask): 

1468 return 

1469 xm = x[mask] 

1470 g = dL_dy[mask] 

1471 # μ = (b-x)/(b-a) 

1472 # ∂μ/∂a = (b-x)/(d^2) 

1473 # ∂μ/∂b = (x-a)/(d^2) 

1474 dmu_da = (b - xm) / (d * d) 

1475 dmu_db = (xm - a) / (d * d) 

1476 self.gradients["a"] += float(np.sum(g * dmu_da)) 

1477 self.gradients["b"] += float(np.sum(g * dmu_db)) 

1478 

1479 # No return; gradients are accumulated in-place 

1480 

1481 

1482class PiMF(MembershipFunction): 

1483 """Pi-shaped membership function. 

1484 

1485 The Pi-shaped membership function is characterized by a trapezoidal-like shape 

1486 with smooth S-shaped transitions on both sides. It is defined by four parameters 

1487 that control the shape and position: 

1488 

1489 Mathematical definition: 

1490 μ(x) = S(x; a, b) for x ∈ [a, b] 

1491 = 1 for x ∈ [b, c] 

1492 = Z(x; c, d) for x ∈ [c, d] 

1493 = 0 elsewhere 

1494 

1495 Where: 

1496 - S(x; a, b) is an S-shaped function from 0 to 1 

1497 - Z(x; c, d) is a Z-shaped function from 1 to 0 

1498 

1499 The S and Z functions use smooth cubic splines for differentiability: 

1500 S(x; a, b) = 2*((x-a)/(b-a))^3 for x ∈ [a, (a+b)/2] 

1501 = 1 - 2*((b-x)/(b-a))^3 for x ∈ [(a+b)/2, b] 

1502 

1503 Parameters: 

1504 a (float): Left foot of the function (where function starts rising from 0) 

1505 b (float): Left shoulder of the function (where function reaches 1) 

1506 c (float): Right shoulder of the function (where function starts falling from 1) 

1507 d (float): Right foot of the function (where function reaches 0) 

1508 

1509 Note: 

1510 Parameters must satisfy: a < b ≤ c < d 

1511 """ 

1512 

1513 def __init__(self, a: float, b: float, c: float, d: float): 

1514 """Initialize the Pi-shaped membership function. 

1515 

1516 Args: 

1517 a: Left foot parameter. 

1518 b: Left shoulder parameter. 

1519 c: Right shoulder parameter. 

1520 d: Right foot parameter. 

1521 

1522 Raises: 

1523 ValueError: If parameters don't satisfy a < b ≤ c < d. 

1524 """ 

1525 super().__init__() 

1526 

1527 # Parameter validation 

1528 if not (a < b <= c < d): 

1529 raise ValueError(f"Parameters must satisfy a < b ≤ c < d, got a={a}, b={b}, c={c}, d={d}") 

1530 

1531 self.parameters = {"a": float(a), "b": float(b), "c": float(c), "d": float(d)} 

1532 self.gradients = {"a": 0.0, "b": 0.0, "c": 0.0, "d": 0.0} 

1533 

1534 def forward(self, x: np.ndarray) -> np.ndarray: 

1535 """Compute the Pi-shaped membership function. 

1536 

1537 Combines S and Z functions for smooth transitions: 

1538 - Rising edge: S-function from a to b 

1539 - Flat top: constant 1 from b to c 

1540 - Falling edge: Z-function from c to d 

1541 - Outside: 0 

1542 

1543 Args: 

1544 x: Input values. 

1545 

1546 Returns: 

1547 np.ndarray: Membership values μ(x) ∈ [0, 1]. 

1548 """ 

1549 x = np.asarray(x) 

1550 self.last_input = x.copy() 

1551 

1552 a, b, c, d = self.parameters["a"], self.parameters["b"], self.parameters["c"], self.parameters["d"] 

1553 

1554 # Initialize output 

1555 y = np.zeros_like(x, dtype=np.float64) 

1556 

1557 # S-function for rising edge [a, b] 

1558 mask_s = (x >= a) & (x <= b) 

1559 if np.any(mask_s): 

1560 x_s = x[mask_s] 

1561 # Avoid division by zero 

1562 if b != a: 

1563 t = (x_s - a) / (b - a) # Normalize to [0, 1] 

1564 

1565 # Smooth S-function using smoothstep: S(t) = 3*t² - 2*t³ 

1566 # This is continuous and differentiable across the entire [0,1] interval 

1567 y_s = _smoothstep(t) 

1568 

1569 y[mask_s] = y_s 

1570 else: 

1571 # Degenerate case: instant transition 

1572 y[mask_s] = 1.0 

1573 

1574 # Flat region [b, c]: μ(x) = 1 

1575 mask_flat = (x >= b) & (x <= c) 

1576 y[mask_flat] = 1.0 

1577 

1578 # Z-function for falling edge [c, d] 

1579 mask_z = (x >= c) & (x <= d) 

1580 if np.any(mask_z): 

1581 x_z = x[mask_z] 

1582 # Avoid division by zero 

1583 if d != c: 

1584 t = (x_z - c) / (d - c) # Normalize to [0, 1] 

1585 

1586 # Smooth Z-function (inverted smoothstep): Z(t) = 1 - S(t) = 1 - (3*t² - 2*t³) 

1587 # This is continuous and differentiable, going from 1 to 0 

1588 y_z = 1 - _smoothstep(t) 

1589 

1590 y[mask_z] = y_z 

1591 else: 

1592 # Degenerate case: instant transition 

1593 y[mask_z] = 0.0 

1594 

1595 self.last_output = y.copy() 

1596 return y 

1597 

1598 def backward(self, dL_dy: np.ndarray) -> None: 

1599 """Compute gradients for backpropagation. 

1600 

1601 Analytical gradients are computed by region: 

1602 - S-function: gradients w.r.t. a, b 

1603 - Z-function: gradients w.r.t. c, d 

1604 - Flat region: no gradients 

1605 

1606 Args: 

1607 dL_dy: Gradient of loss w.r.t. function output. 

1608 """ 

1609 if self.last_input is None or self.last_output is None: 

1610 return 

1611 

1612 x = self.last_input 

1613 dL_dy = np.asarray(dL_dy) 

1614 

1615 a, b, c, d = self.parameters["a"], self.parameters["b"], self.parameters["c"], self.parameters["d"] 

1616 

1617 # Initialize gradients 

1618 grad_a = grad_b = grad_c = grad_d = 0.0 

1619 

1620 # S-function gradients [a, b] 

1621 mask_s = (x >= a) & (x <= b) 

1622 if np.any(mask_s) and b != a: 

1623 x_s = x[mask_s] 

1624 dL_dy_s = dL_dy[mask_s] 

1625 t = (x_s - a) / (b - a) 

1626 

1627 # Calculate parameter derivatives 

1628 dt_da = (x_s - b) / (b - a) ** 2 # Correct derivative 

1629 dt_db = -(x_s - a) / (b - a) ** 2 

1630 

1631 # For smoothstep S(t) = 3*t² - 2*t³, derivative is dS/dt = 6*t - 6*t² = 6*t*(1-t) 

1632 dS_dt = _dsmoothstep_dt(t) 

1633 

1634 # Apply chain rule: dS/da = dS/dt * dt/da 

1635 dS_da = dS_dt * dt_da 

1636 dS_db = dS_dt * dt_db 

1637 

1638 grad_a += np.sum(dL_dy_s * dS_da) 

1639 grad_b += np.sum(dL_dy_s * dS_db) 

1640 

1641 # Z-function gradients [c, d] 

1642 mask_z = (x >= c) & (x <= d) 

1643 if np.any(mask_z) and d != c: 

1644 x_z = x[mask_z] 

1645 dL_dy_z = dL_dy[mask_z] 

1646 t = (x_z - c) / (d - c) 

1647 

1648 # Calculate parameter derivatives 

1649 dt_dc = (x_z - d) / (d - c) ** 2 # Correct derivative 

1650 dt_dd = -(x_z - c) / (d - c) ** 2 

1651 

1652 # For Z(t) = 1 - S(t) = 1 - (3*t² - 2*t³), derivative is dZ/dt = -dS/dt = -6*t*(1-t) = 6*t*(t-1) 

1653 dZ_dt = -_dsmoothstep_dt(t) 

1654 

1655 # Apply chain rule: dZ/dc = dZ/dt * dt/dc 

1656 dZ_dc = dZ_dt * dt_dc 

1657 dZ_dd = dZ_dt * dt_dd 

1658 

1659 grad_c += np.sum(dL_dy_z * dZ_dc) 

1660 grad_d += np.sum(dL_dy_z * dZ_dd) 

1661 

1662 # Accumulate gradients 

1663 self.gradients["a"] += grad_a 

1664 self.gradients["b"] += grad_b 

1665 self.gradients["c"] += grad_c 

1666 self.gradients["d"] += grad_d