guohanghui commited on
Commit
b5fa6f2
·
verified ·
1 Parent(s): cef0ac2

Update GSTools/mcp_output/mcp_plugin/mcp_service.py

Browse files
GSTools/mcp_output/mcp_plugin/mcp_service.py CHANGED
@@ -1,154 +1,1206 @@
 
 
 
 
 
 
 
 
 
1
  from fastmcp import FastMCP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  # Create the FastMCP service application
4
  mcp = FastMCP("gstools_service")
5
 
6
- @mcp.tool(name="list_available_tools", description="List all available GSTools functionalities")
7
- def list_available_tools() -> dict:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  """
9
- List all available functionalities in GSTools.
10
 
11
  Returns:
12
- - dict: A dictionary with success status and list of available tools.
13
  """
14
  try:
15
- tools = [
16
- "random_field",
17
- "cov_model",
18
- "variogram",
19
- "vector_field",
20
- "kriging",
21
- "conditioned_fields",
22
- "transformations",
23
- "geo_coordinates",
24
- "spatio_temporal",
25
- "normalizer",
26
- "plurigaussian",
27
- "sum_model"
28
- ]
29
- return {
30
- "success": True,
31
- "tools": tools,
32
- "count": len(tools)
33
  }
 
34
  except Exception as e:
35
- return {"success": False, "error": str(e)}
36
 
37
- @mcp.tool(name="list_covariance_models", description="List all available covariance models in GSTools")
38
- def list_covariance_models() -> dict:
 
 
 
 
 
 
 
 
 
39
  """
40
- List all available covariance models in GSTools.
 
 
 
 
 
 
 
 
 
41
 
42
  Returns:
43
- - dict: A dictionary with success status and list of covariance models.
44
  """
45
  try:
46
- models = [
47
- "Nugget",
48
- "Gaussian",
49
- "Exponential",
50
- "Matern",
51
- "Integral",
52
- "Stable",
53
- "Rational",
54
- "Cubic",
55
- "Linear",
56
- "Circular",
57
- "Spherical",
58
- "HyperSpherical",
59
- "SuperSpherical",
60
- "JBessel"
61
- ]
62
- return {
63
- "success": True,
64
- "models": models,
65
- "count": len(models)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  }
 
 
 
 
 
 
 
 
67
  except Exception as e:
68
- return {"success": False, "error": str(e)}
69
 
70
- @mcp.tool(name="generate_random_field", description="Generate a spatial random field using GSTools")
71
- def generate_random_field(model_name: str, size: int, seed: int = None) -> dict:
 
 
 
 
 
 
 
 
72
  """
73
- Generate a spatial random field using a specified covariance model.
74
 
75
  Parameters:
76
- - model_name: Name of the covariance model (e.g., 'Gaussian', 'Exponential')
77
- - size: Size of the random field (e.g., 100 for a 100x100 field)
78
- - seed: Random seed for reproducibility (optional)
 
 
 
79
 
80
  Returns:
81
- - dict: Information about the generated random field
82
  """
83
  try:
84
- from gstools import SRF, Gaussian, Exponential, Matern
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
- model_map = {
87
- "Gaussian": Gaussian,
88
- "Exponential": Exponential,
89
- "Matern": Matern
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
- if model_name not in model_map:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  return {
94
  "success": False,
95
- "error": f"Model '{model_name}' not found. Use list_covariance_models to see available options."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
- model = model_map[model_name](dim=2, var=1, len_scale=10)
99
- srf = SRF(model, seed=seed)
100
- field = srf((size, size))
 
 
 
 
101
 
102
- return {
103
- "success": True,
104
- "model_name": model_name,
105
- "field_shape": field.shape,
106
- "field": field.tolist()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  }
 
108
  except Exception as e:
109
- return {"success": False, "error": str(e)}
 
110
 
111
- @mcp.tool(name="fit_variogram", description="Fit a variogram model to data")
112
- def fit_variogram(data: list, model_name: str) -> dict:
 
 
 
 
 
113
  """
114
- Fit a variogram model to the given data.
115
 
116
  Parameters:
117
- - data: List of data points to fit the variogram model
118
- - model_name: Name of the variogram model (e.g., 'Gaussian', 'Exponential')
 
 
119
 
120
  Returns:
121
- - dict: Fitted variogram model parameters
122
  """
123
  try:
124
- from gstools.covmodel import fit_variogram
125
- from gstools.covmodel.models import Gaussian, Exponential, Matern
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
- model_map = {
128
- "Gaussian": Gaussian,
129
- "Exponential": Exponential,
130
- "Matern": Matern
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
- if model_name not in model_map:
 
 
 
 
 
134
  return {
135
  "success": False,
136
- "error": f"Model '{model_name}' not found. Use list_covariance_models to see available options."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  }
 
 
 
 
138
 
139
- model = model_map[model_name]()
140
- params = fit_variogram(data, model)
141
 
142
- return {
143
- "success": True,
144
- "model_name": model_name,
145
- "fitted_parameters": params
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  }
 
147
  except Exception as e:
148
- return {"success": False, "error": str(e)}
149
 
150
- # Add more tools here following the same pattern as above
151
- # Each tool should wrap a core functionality of GSTools
152
 
153
- if __name__ == "__main__":
154
- mcp.run()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ from typing import List, Dict, Any, Optional
4
+
5
+ # Path settings to include the local source directory
6
+ source_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), "source", "src")
7
+ if source_path not in sys.path:
8
+ sys.path.insert(0, source_path)
9
+
10
  from fastmcp import FastMCP
11
+ import numpy as np
12
+
13
+ import gstools as gs
14
+ from gstools import (
15
+ Gaussian, Exponential, Matern, Spherical, Linear,
16
+ Stable, Rational, Cubic, Circular, Nugget,
17
+ SRF, CondSRF, vario_estimate, standard_bins, Krige,
18
+ generate_grid, rotated_main_axes
19
+ )
20
+ from gstools.transform import (
21
+ normal_to_lognormal, normal_to_uniform,
22
+ boxcox, zinnharvey, binary, discrete
23
+ )
24
+ from gstools.normalizer import LogNormal, BoxCox, YeoJohnson
25
 
26
  # Create the FastMCP service application
27
  mcp = FastMCP("gstools_service")
28
 
29
+ # Mapping of model names to classes
30
+ COVARIANCE_MODELS = {
31
+ "gaussian": Gaussian,
32
+ "exponential": Exponential,
33
+ "matern": Matern,
34
+ "spherical": Spherical,
35
+ "linear": Linear,
36
+ "stable": Stable,
37
+ "rational": Rational,
38
+ "cubic": Cubic,
39
+ "circular": Circular,
40
+ "nugget": Nugget,
41
+ }
42
+
43
+
44
+ # ===================== Covariance Model Tools =====================
45
+
46
+ @mcp.tool(name="list_covariance_models", description="List available covariance model types")
47
+ def list_covariance_models() -> dict:
48
  """
49
+ List all available covariance model types in GSTools.
50
 
51
  Returns:
52
+ - dict: Available model types and their descriptions.
53
  """
54
  try:
55
+ result = {
56
+ "gaussian": "Gaussian covariance model - smooth random fields",
57
+ "exponential": "Exponential covariance model - rough random fields",
58
+ "matern": "Matérn covariance model - adjustable smoothness via nu parameter",
59
+ "spherical": "Spherical covariance model - linear near origin, finite range",
60
+ "linear": "Linear covariance model - simple linear decrease",
61
+ "stable": "Stable covariance model - generalization of Gaussian",
62
+ "rational": "Rational quadratic covariance model",
63
+ "cubic": "Cubic covariance model - smooth near origin",
64
+ "circular": "Circular covariance model",
65
+ "nugget": "Nugget effect model - pure discontinuity at origin",
 
 
 
 
 
 
 
66
  }
67
+ return {"success": True, "result": result, "error": None}
68
  except Exception as e:
69
+ return {"success": False, "result": None, "error": str(e)}
70
 
71
+
72
+ @mcp.tool(name="create_covariance_model", description="Create a covariance model with specified parameters")
73
+ def create_covariance_model(
74
+ model_type: str,
75
+ dim: int = 2,
76
+ var: float = 1.0,
77
+ len_scale: float = 10.0,
78
+ nugget: float = 0.0,
79
+ nu: Optional[float] = None,
80
+ alpha: Optional[float] = None
81
+ ) -> dict:
82
  """
83
+ Create a covariance model and return its properties.
84
+
85
+ Parameters:
86
+ - model_type (str): Type of covariance model (gaussian, exponential, matern, etc.)
87
+ - dim (int): Spatial dimension (default: 2)
88
+ - var (float): Variance/sill (default: 1.0)
89
+ - len_scale (float): Correlation length scale (default: 10.0)
90
+ - nugget (float): Nugget effect (default: 0.0)
91
+ - nu (float, optional): Smoothness parameter for Matérn model
92
+ - alpha (float, optional): Shape parameter for Stable model
93
 
94
  Returns:
95
+ - dict: Model properties including variance, length scale, integral scale, etc.
96
  """
97
  try:
98
+ model_type_lower = model_type.lower()
99
+ if model_type_lower not in COVARIANCE_MODELS:
100
+ return {
101
+ "success": False,
102
+ "result": None,
103
+ "error": f"Unknown model type: {model_type}. Available: {list(COVARIANCE_MODELS.keys())}"
104
+ }
105
+
106
+ ModelClass = COVARIANCE_MODELS[model_type_lower]
107
+
108
+ # Build kwargs
109
+ kwargs = {
110
+ "dim": dim,
111
+ "var": var,
112
+ "len_scale": len_scale,
113
+ "nugget": nugget,
114
+ }
115
+
116
+ # Add optional parameters for specific models
117
+ if model_type_lower == "matern" and nu is not None:
118
+ kwargs["nu"] = nu
119
+ if model_type_lower == "stable" and alpha is not None:
120
+ kwargs["alpha"] = alpha
121
+
122
+ model = ModelClass(**kwargs)
123
+
124
+ result = {
125
+ "model_type": model_type,
126
+ "dim": model.dim,
127
+ "var": model.var,
128
+ "len_scale": float(model.len_scale),
129
+ "nugget": model.nugget,
130
+ "sill": model.sill,
131
+ "integral_scale": float(model.integral_scale),
132
+ "hankel_kw": model.hankel_kw,
133
  }
134
+
135
+ # Add model-specific parameters
136
+ if hasattr(model, "nu"):
137
+ result["nu"] = model.nu
138
+ if hasattr(model, "alpha"):
139
+ result["alpha"] = model.alpha
140
+
141
+ return {"success": True, "result": result, "error": None}
142
  except Exception as e:
143
+ return {"success": False, "result": None, "error": str(e)}
144
 
145
+
146
+ @mcp.tool(name="evaluate_covariance", description="Evaluate covariance function at given distances")
147
+ def evaluate_covariance(
148
+ model_type: str,
149
+ distances: List[float],
150
+ var: float = 1.0,
151
+ len_scale: float = 10.0,
152
+ nugget: float = 0.0,
153
+ nu: Optional[float] = None
154
+ ) -> dict:
155
  """
156
+ Evaluate the covariance function at given distances.
157
 
158
  Parameters:
159
+ - model_type (str): Type of covariance model
160
+ - distances (List[float]): List of distances to evaluate
161
+ - var (float): Variance (default: 1.0)
162
+ - len_scale (float): Length scale (default: 10.0)
163
+ - nugget (float): Nugget effect (default: 0.0)
164
+ - nu (float, optional): Smoothness for Matérn model
165
 
166
  Returns:
167
+ - dict: Covariance and variogram values at each distance
168
  """
169
  try:
170
+ model_type_lower = model_type.lower()
171
+ if model_type_lower not in COVARIANCE_MODELS:
172
+ return {
173
+ "success": False,
174
+ "result": None,
175
+ "error": f"Unknown model type: {model_type}"
176
+ }
177
+
178
+ ModelClass = COVARIANCE_MODELS[model_type_lower]
179
+ kwargs = {"var": var, "len_scale": len_scale, "nugget": nugget}
180
+ if model_type_lower == "matern" and nu is not None:
181
+ kwargs["nu"] = nu
182
+
183
+ model = ModelClass(**kwargs)
184
+
185
+ r = np.array(distances)
186
+ cov_values = model.covariance(r)
187
+ vario_values = model.variogram(r)
188
+
189
+ result = {
190
+ "distances": distances,
191
+ "covariance": cov_values.tolist(),
192
+ "variogram": vario_values.tolist(),
193
+ "model_type": model_type,
194
+ "var": var,
195
+ "len_scale": len_scale,
196
+ "nugget": nugget,
197
+ }
198
+ return {"success": True, "result": result, "error": None}
199
+ except Exception as e:
200
+ return {"success": False, "result": None, "error": str(e)}
201
+
202
+
203
+ # ===================== Random Field Generation =====================
204
 
205
+ @mcp.tool(name="generate_random_field_1d", description="Generate a 1D spatial random field")
206
+ def generate_random_field_1d(
207
+ model_type: str,
208
+ x_min: float,
209
+ x_max: float,
210
+ n_points: int,
211
+ var: float = 1.0,
212
+ len_scale: float = 10.0,
213
+ mean: float = 0.0,
214
+ seed: Optional[int] = None
215
+ ) -> dict:
216
+ """
217
+ Generate a 1D spatial random field.
218
+
219
+ Parameters:
220
+ - model_type (str): Type of covariance model
221
+ - x_min (float): Minimum x coordinate
222
+ - x_max (float): Maximum x coordinate
223
+ - n_points (int): Number of points
224
+ - var (float): Variance (default: 1.0)
225
+ - len_scale (float): Length scale (default: 10.0)
226
+ - mean (float): Mean of the field (default: 0.0)
227
+ - seed (int, optional): Random seed for reproducibility
228
+
229
+ Returns:
230
+ - dict: Generated field values and coordinates
231
+ """
232
+ try:
233
+ model_type_lower = model_type.lower()
234
+ if model_type_lower not in COVARIANCE_MODELS:
235
+ return {
236
+ "success": False,
237
+ "result": None,
238
+ "error": f"Unknown model type: {model_type}"
239
+ }
240
+
241
+ ModelClass = COVARIANCE_MODELS[model_type_lower]
242
+ model = ModelClass(dim=1, var=var, len_scale=len_scale)
243
+
244
+ srf = SRF(model, mean=mean)
245
+ x = np.linspace(x_min, x_max, n_points)
246
+
247
+ if seed is not None:
248
+ field = srf((x,), seed=seed)
249
+ else:
250
+ field = srf((x,))
251
+
252
+ result = {
253
+ "x": x.tolist(),
254
+ "field": field.tolist(),
255
+ "model_type": model_type,
256
+ "var": var,
257
+ "len_scale": len_scale,
258
+ "mean": mean,
259
+ "seed": seed,
260
+ "n_points": n_points,
261
+ "field_stats": {
262
+ "min": float(np.min(field)),
263
+ "max": float(np.max(field)),
264
+ "mean": float(np.mean(field)),
265
+ "std": float(np.std(field)),
266
+ }
267
  }
268
+ return {"success": True, "result": result, "error": None}
269
+ except Exception as e:
270
+ return {"success": False, "result": None, "error": str(e)}
271
+
272
+
273
+ @mcp.tool(name="generate_random_field_2d", description="Generate a 2D spatial random field")
274
+ def generate_random_field_2d(
275
+ model_type: str,
276
+ x_min: float,
277
+ x_max: float,
278
+ y_min: float,
279
+ y_max: float,
280
+ nx: int,
281
+ ny: int,
282
+ var: float = 1.0,
283
+ len_scale: float = 10.0,
284
+ mean: float = 0.0,
285
+ seed: Optional[int] = None
286
+ ) -> dict:
287
+ """
288
+ Generate a 2D spatial random field on a structured grid.
289
 
290
+ Parameters:
291
+ - model_type (str): Type of covariance model
292
+ - x_min, x_max (float): X coordinate range
293
+ - y_min, y_max (float): Y coordinate range
294
+ - nx, ny (int): Number of grid points in x and y
295
+ - var (float): Variance (default: 1.0)
296
+ - len_scale (float): Length scale (default: 10.0)
297
+ - mean (float): Mean of the field (default: 0.0)
298
+ - seed (int, optional): Random seed
299
+
300
+ Returns:
301
+ - dict: Generated field as 2D array and grid info
302
+ """
303
+ try:
304
+ model_type_lower = model_type.lower()
305
+ if model_type_lower not in COVARIANCE_MODELS:
306
  return {
307
  "success": False,
308
+ "result": None,
309
+ "error": f"Unknown model type: {model_type}"
310
+ }
311
+
312
+ ModelClass = COVARIANCE_MODELS[model_type_lower]
313
+ model = ModelClass(dim=2, var=var, len_scale=len_scale)
314
+
315
+ srf = SRF(model, mean=mean)
316
+ x = np.linspace(x_min, x_max, nx)
317
+ y = np.linspace(y_min, y_max, ny)
318
+
319
+ if seed is not None:
320
+ field = srf.structured((x, y), seed=seed)
321
+ else:
322
+ field = srf.structured((x, y))
323
+
324
+ result = {
325
+ "x": x.tolist(),
326
+ "y": y.tolist(),
327
+ "field": field.tolist(),
328
+ "shape": list(field.shape),
329
+ "model_type": model_type,
330
+ "var": var,
331
+ "len_scale": len_scale,
332
+ "mean": mean,
333
+ "seed": seed,
334
+ "field_stats": {
335
+ "min": float(np.min(field)),
336
+ "max": float(np.max(field)),
337
+ "mean": float(np.mean(field)),
338
+ "std": float(np.std(field)),
339
  }
340
+ }
341
+ return {"success": True, "result": result, "error": None}
342
+ except Exception as e:
343
+ return {"success": False, "result": None, "error": str(e)}
344
+
345
+
346
+ # ===================== Variogram Estimation =====================
347
+
348
+ @mcp.tool(name="estimate_variogram_from_data", description="Estimate empirical variogram from spatial data")
349
+ def estimate_variogram_from_data(
350
+ pos_x: List[float],
351
+ pos_y: List[float],
352
+ values: List[float],
353
+ n_bins: int = 10,
354
+ max_lag: Optional[float] = None,
355
+ estimator: str = "matheron"
356
+ ) -> dict:
357
+ """
358
+ Estimate an empirical variogram from spatial data.
359
 
360
+ Parameters:
361
+ - pos_x (List[float]): X coordinates of data points
362
+ - pos_y (List[float]): Y coordinates of data points
363
+ - values (List[float]): Values at each point
364
+ - n_bins (int): Number of lag bins (default: 10)
365
+ - max_lag (float, optional): Maximum lag distance
366
+ - estimator (str): Estimator type - 'matheron' or 'cressie' (default: 'matheron')
367
 
368
+ Returns:
369
+ - dict: Empirical variogram with bin centers and gamma values
370
+ """
371
+ try:
372
+ pos = np.array([pos_x, pos_y])
373
+ field = np.array(values)
374
+
375
+ # Determine max lag if not provided
376
+ if max_lag is None:
377
+ x_range = np.max(pos_x) - np.min(pos_x)
378
+ y_range = np.max(pos_y) - np.min(pos_y)
379
+ max_lag = 0.5 * np.sqrt(x_range**2 + y_range**2)
380
+
381
+ # Create bins
382
+ bin_edges = standard_bins(pos, max_lag=max_lag, bin_no=n_bins)
383
+
384
+ # Estimate variogram
385
+ bin_center, gamma = vario_estimate(
386
+ pos, field, bin_edges=bin_edges, estimator=estimator
387
+ )
388
+
389
+ result = {
390
+ "bin_center": bin_center.tolist(),
391
+ "gamma": gamma.tolist(),
392
+ "n_bins": n_bins,
393
+ "max_lag": max_lag,
394
+ "estimator": estimator,
395
+ "n_points": len(values),
396
  }
397
+ return {"success": True, "result": result, "error": None}
398
  except Exception as e:
399
+ return {"success": False, "result": None, "error": str(e)}
400
+
401
 
402
+ @mcp.tool(name="fit_variogram_model", description="Fit a covariance model to empirical variogram")
403
+ def fit_variogram_model(
404
+ bin_center: List[float],
405
+ gamma: List[float],
406
+ model_type: str = "gaussian",
407
+ nugget: bool = False
408
+ ) -> dict:
409
  """
410
+ Fit a covariance model to an empirical variogram.
411
 
412
  Parameters:
413
+ - bin_center (List[float]): Lag distances (bin centers)
414
+ - gamma (List[float]): Empirical variogram values
415
+ - model_type (str): Model type to fit (default: 'gaussian')
416
+ - nugget (bool): Whether to fit nugget effect (default: False)
417
 
418
  Returns:
419
+ - dict: Fitted model parameters
420
  """
421
  try:
422
+ model_type_lower = model_type.lower()
423
+ if model_type_lower not in COVARIANCE_MODELS:
424
+ return {
425
+ "success": False,
426
+ "result": None,
427
+ "error": f"Unknown model type: {model_type}"
428
+ }
429
+
430
+ ModelClass = COVARIANCE_MODELS[model_type_lower]
431
+ model = ModelClass(dim=2)
432
+
433
+ bin_center_arr = np.array(bin_center)
434
+ gamma_arr = np.array(gamma)
435
+
436
+ # Fit the model
437
+ model.fit_variogram(bin_center_arr, gamma_arr, nugget=nugget)
438
+
439
+ result = {
440
+ "model_type": model_type,
441
+ "var": model.var,
442
+ "len_scale": float(model.len_scale),
443
+ "nugget": model.nugget,
444
+ "sill": model.sill,
445
+ "integral_scale": float(model.integral_scale),
446
+ }
447
+
448
+ # Add model-specific parameters
449
+ if hasattr(model, "nu"):
450
+ result["nu"] = model.nu
451
+
452
+ return {"success": True, "result": result, "error": None}
453
+ except Exception as e:
454
+ return {"success": False, "result": None, "error": str(e)}
455
 
456
+
457
+ # ===================== Kriging =====================
458
+
459
+ @mcp.tool(name="simple_kriging", description="Perform simple kriging interpolation")
460
+ def simple_kriging(
461
+ cond_pos_x: List[float],
462
+ cond_pos_y: List[float],
463
+ cond_values: List[float],
464
+ target_pos_x: List[float],
465
+ target_pos_y: List[float],
466
+ model_type: str = "gaussian",
467
+ var: float = 1.0,
468
+ len_scale: float = 10.0,
469
+ mean: float = 0.0
470
+ ) -> dict:
471
+ """
472
+ Perform simple kriging interpolation.
473
+
474
+ Parameters:
475
+ - cond_pos_x, cond_pos_y (List[float]): Conditioning point coordinates
476
+ - cond_values (List[float]): Values at conditioning points
477
+ - target_pos_x, target_pos_y (List[float]): Target point coordinates
478
+ - model_type (str): Covariance model type (default: 'gaussian')
479
+ - var (float): Variance (default: 1.0)
480
+ - len_scale (float): Length scale (default: 10.0)
481
+ - mean (float): Known mean for simple kriging (default: 0.0)
482
+
483
+ Returns:
484
+ - dict: Kriging predictions and variances
485
+ """
486
+ try:
487
+ model_type_lower = model_type.lower()
488
+ if model_type_lower not in COVARIANCE_MODELS:
489
+ return {
490
+ "success": False,
491
+ "result": None,
492
+ "error": f"Unknown model type: {model_type}"
493
+ }
494
+
495
+ ModelClass = COVARIANCE_MODELS[model_type_lower]
496
+ model = ModelClass(dim=2, var=var, len_scale=len_scale)
497
+
498
+ cond_pos = np.array([cond_pos_x, cond_pos_y])
499
+ cond_val = np.array(cond_values)
500
+ target_pos = np.array([target_pos_x, target_pos_y])
501
+
502
+ krige = Krige(model, cond_pos=cond_pos, cond_val=cond_val, mean=mean)
503
+ predictions, variances = krige(target_pos)
504
+
505
+ result = {
506
+ "predictions": predictions.tolist(),
507
+ "variances": variances.tolist(),
508
+ "target_x": target_pos_x,
509
+ "target_y": target_pos_y,
510
+ "n_cond_points": len(cond_values),
511
+ "n_target_points": len(target_pos_x),
512
+ "model_type": model_type,
513
+ "var": var,
514
+ "len_scale": len_scale,
515
+ "mean": mean,
516
  }
517
+ return {"success": True, "result": result, "error": None}
518
+ except Exception as e:
519
+ return {"success": False, "result": None, "error": str(e)}
520
+
521
+
522
+ @mcp.tool(name="ordinary_kriging", description="Perform ordinary kriging interpolation")
523
+ def ordinary_kriging(
524
+ cond_pos_x: List[float],
525
+ cond_pos_y: List[float],
526
+ cond_values: List[float],
527
+ target_pos_x: List[float],
528
+ target_pos_y: List[float],
529
+ model_type: str = "gaussian",
530
+ var: float = 1.0,
531
+ len_scale: float = 10.0
532
+ ) -> dict:
533
+ """
534
+ Perform ordinary kriging interpolation (unknown mean).
535
+
536
+ Parameters:
537
+ - cond_pos_x, cond_pos_y (List[float]): Conditioning point coordinates
538
+ - cond_values (List[float]): Values at conditioning points
539
+ - target_pos_x, target_pos_y (List[float]): Target point coordinates
540
+ - model_type (str): Covariance model type (default: 'gaussian')
541
+ - var (float): Variance (default: 1.0)
542
+ - len_scale (float): Length scale (default: 10.0)
543
 
544
+ Returns:
545
+ - dict: Kriging predictions and variances
546
+ """
547
+ try:
548
+ model_type_lower = model_type.lower()
549
+ if model_type_lower not in COVARIANCE_MODELS:
550
  return {
551
  "success": False,
552
+ "result": None,
553
+ "error": f"Unknown model type: {model_type}"
554
+ }
555
+
556
+ ModelClass = COVARIANCE_MODELS[model_type_lower]
557
+ model = ModelClass(dim=2, var=var, len_scale=len_scale)
558
+
559
+ cond_pos = np.array([cond_pos_x, cond_pos_y])
560
+ cond_val = np.array(cond_values)
561
+ target_pos = np.array([target_pos_x, target_pos_y])
562
+
563
+ # Ordinary kriging uses unbiased=True
564
+ krige = Krige(model, cond_pos=cond_pos, cond_val=cond_val, unbiased=True)
565
+ predictions, variances = krige(target_pos)
566
+
567
+ result = {
568
+ "predictions": predictions.tolist(),
569
+ "variances": variances.tolist(),
570
+ "target_x": target_pos_x,
571
+ "target_y": target_pos_y,
572
+ "n_cond_points": len(cond_values),
573
+ "n_target_points": len(target_pos_x),
574
+ "model_type": model_type,
575
+ "var": var,
576
+ "len_scale": len_scale,
577
+ "estimated_mean": float(np.mean(cond_values)),
578
+ }
579
+ return {"success": True, "result": result, "error": None}
580
+ except Exception as e:
581
+ return {"success": False, "result": None, "error": str(e)}
582
+
583
+
584
+ # ===================== Utility Tools =====================
585
+
586
+ @mcp.tool(name="generate_standard_bins", description="Generate standard binning for variogram estimation")
587
+ def generate_standard_bins(
588
+ pos_x: List[float],
589
+ pos_y: List[float],
590
+ n_bins: int = 10,
591
+ max_lag: Optional[float] = None
592
+ ) -> dict:
593
+ """
594
+ Generate standard bin edges for variogram estimation.
595
+
596
+ Parameters:
597
+ - pos_x, pos_y (List[float]): Position coordinates
598
+ - n_bins (int): Number of bins (default: 10)
599
+ - max_lag (float, optional): Maximum lag distance
600
+
601
+ Returns:
602
+ - dict: Bin edges and bin centers
603
+ """
604
+ try:
605
+ pos = np.array([pos_x, pos_y])
606
+
607
+ if max_lag is None:
608
+ x_range = np.max(pos_x) - np.min(pos_x)
609
+ y_range = np.max(pos_y) - np.min(pos_y)
610
+ max_lag = 0.5 * np.sqrt(x_range**2 + y_range**2)
611
+
612
+ bin_edges = standard_bins(pos, max_lag=max_lag, bin_no=n_bins)
613
+ bin_centers = 0.5 * (bin_edges[:-1] + bin_edges[1:])
614
+
615
+ result = {
616
+ "bin_edges": bin_edges.tolist(),
617
+ "bin_centers": bin_centers.tolist(),
618
+ "n_bins": n_bins,
619
+ "max_lag": max_lag,
620
+ }
621
+ return {"success": True, "result": result, "error": None}
622
+ except Exception as e:
623
+ return {"success": False, "result": None, "error": str(e)}
624
+
625
+
626
+ @mcp.tool(name="get_gstools_constants", description="Get useful constants from GSTools")
627
+ def get_gstools_constants() -> dict:
628
+ """
629
+ Get useful constants defined in GSTools for geographic computations.
630
+
631
+ Returns:
632
+ - dict: Earth radius and scale constants
633
+ """
634
+ try:
635
+ result = {
636
+ "EARTH_RADIUS": float(gs.EARTH_RADIUS),
637
+ "KM_SCALE": float(gs.KM_SCALE),
638
+ "DEGREE_SCALE": float(gs.DEGREE_SCALE),
639
+ "RADIAN_SCALE": float(gs.RADIAN_SCALE),
640
+ "description": {
641
+ "EARTH_RADIUS": "Earth radius in km (~6371)",
642
+ "KM_SCALE": "Scale factor for km on Earth surface",
643
+ "DEGREE_SCALE": "Scale factor for degrees on Earth surface",
644
+ "RADIAN_SCALE": "Scale factor for radians (1.0)",
645
  }
646
+ }
647
+ return {"success": True, "result": result, "error": None}
648
+ except Exception as e:
649
+ return {"success": False, "result": None, "error": str(e)}
650
 
 
 
651
 
652
+ # ===================== Conditioned Random Fields =====================
653
+
654
+ @mcp.tool(name="generate_conditioned_random_field_2d", description="Generate a conditioned 2D spatial random field")
655
+ def generate_conditioned_random_field_2d(
656
+ model_type: str,
657
+ cond_pos_x: List[float],
658
+ cond_pos_y: List[float],
659
+ cond_values: List[float],
660
+ x_min: float,
661
+ x_max: float,
662
+ y_min: float,
663
+ y_max: float,
664
+ nx: int,
665
+ ny: int,
666
+ var: float = 1.0,
667
+ len_scale: float = 10.0,
668
+ mean: float = 0.0,
669
+ seed: Optional[int] = None
670
+ ) -> dict:
671
+ """
672
+ Generate a 2D conditioned spatial random field (honors conditioning data).
673
+
674
+ Parameters:
675
+ - model_type (str): Type of covariance model
676
+ - cond_pos_x, cond_pos_y (List[float]): Conditioning point coordinates
677
+ - cond_values (List[float]): Values at conditioning points
678
+ - x_min, x_max, y_min, y_max (float): Field extent
679
+ - nx, ny (int): Number of grid points
680
+ - var (float): Variance (default: 1.0)
681
+ - len_scale (float): Length scale (default: 10.0)
682
+ - mean (float): Mean (default: 0.0)
683
+ - seed (int, optional): Random seed
684
+
685
+ Returns:
686
+ - dict: Conditioned field and grid info
687
+ """
688
+ try:
689
+ model_type_lower = model_type.lower()
690
+ if model_type_lower not in COVARIANCE_MODELS:
691
+ return {
692
+ "success": False,
693
+ "result": None,
694
+ "error": f"Unknown model type: {model_type}"
695
+ }
696
+
697
+ ModelClass = COVARIANCE_MODELS[model_type_lower]
698
+ model = ModelClass(dim=2, var=var, len_scale=len_scale)
699
+
700
+ cond_pos = np.array([cond_pos_x, cond_pos_y])
701
+ cond_val = np.array(cond_values)
702
+
703
+ # Create kriging object
704
+ krige = Krige(model, cond_pos=cond_pos, cond_val=cond_val, mean=mean)
705
+
706
+ # Create conditioned SRF
707
+ cond_srf = CondSRF(krige)
708
+
709
+ # Generate field
710
+ x = np.linspace(x_min, x_max, nx)
711
+ y = np.linspace(y_min, y_max, ny)
712
+
713
+ if seed is not None:
714
+ field = cond_srf.structured((x, y), seed=seed)
715
+ else:
716
+ field = cond_srf.structured((x, y))
717
+
718
+ result = {
719
+ "x": x.tolist(),
720
+ "y": y.tolist(),
721
+ "field": field.tolist(),
722
+ "shape": list(field.shape),
723
+ "cond_pos_x": cond_pos_x,
724
+ "cond_pos_y": cond_pos_y,
725
+ "cond_values": cond_values,
726
+ "n_cond_points": len(cond_values),
727
+ "model_type": model_type,
728
+ "var": var,
729
+ "len_scale": len_scale,
730
+ "mean": mean,
731
+ "seed": seed,
732
+ "field_stats": {
733
+ "min": float(np.min(field)),
734
+ "max": float(np.max(field)),
735
+ "mean": float(np.mean(field)),
736
+ "std": float(np.std(field)),
737
+ }
738
  }
739
+ return {"success": True, "result": result, "error": None}
740
  except Exception as e:
741
+ return {"success": False, "result": None, "error": str(e)}
742
 
 
 
743
 
744
+ # ===================== Field Transformations =====================
745
+
746
+ @mcp.tool(name="transform_to_lognormal", description="Transform normal field to lognormal")
747
+ def transform_to_lognormal(
748
+ field_values: List[float],
749
+ mean: float = 1.0,
750
+ var: float = 1.0
751
+ ) -> dict:
752
+ """
753
+ Transform a normal field to a lognormal distribution.
754
+
755
+ Parameters:
756
+ - field_values (List[float]): Normal field values
757
+ - mean (float): Target lognormal mean (default: 1.0)
758
+ - var (float): Target lognormal variance (default: 1.0)
759
+
760
+ Returns:
761
+ - dict: Transformed field values and statistics
762
+ """
763
+ try:
764
+ field = np.array(field_values)
765
+ transformed = normal_to_lognormal(field, mean=mean, var=var)
766
+
767
+ result = {
768
+ "original_values": field_values,
769
+ "transformed_values": transformed.tolist(),
770
+ "target_mean": mean,
771
+ "target_var": var,
772
+ "actual_mean": float(np.mean(transformed)),
773
+ "actual_var": float(np.var(transformed)),
774
+ "actual_std": float(np.std(transformed)),
775
+ }
776
+ return {"success": True, "result": result, "error": None}
777
+ except Exception as e:
778
+ return {"success": False, "result": None, "error": str(e)}
779
+
780
+
781
+ @mcp.tool(name="transform_to_uniform", description="Transform normal field to uniform distribution")
782
+ def transform_to_uniform(
783
+ field_values: List[float],
784
+ lower: float = 0.0,
785
+ upper: float = 1.0
786
+ ) -> dict:
787
+ """
788
+ Transform a normal field to a uniform distribution.
789
+
790
+ Parameters:
791
+ - field_values (List[float]): Normal field values
792
+ - lower (float): Lower bound (default: 0.0)
793
+ - upper (float): Upper bound (default: 1.0)
794
+
795
+ Returns:
796
+ - dict: Transformed field values
797
+ """
798
+ try:
799
+ field = np.array(field_values)
800
+ transformed = normal_to_uniform(field, lower=lower, upper=upper)
801
+
802
+ result = {
803
+ "original_values": field_values,
804
+ "transformed_values": transformed.tolist(),
805
+ "lower": lower,
806
+ "upper": upper,
807
+ "actual_min": float(np.min(transformed)),
808
+ "actual_max": float(np.max(transformed)),
809
+ "actual_mean": float(np.mean(transformed)),
810
+ }
811
+ return {"success": True, "result": result, "error": None}
812
+ except Exception as e:
813
+ return {"success": False, "result": None, "error": str(e)}
814
+
815
+
816
+ @mcp.tool(name="transform_to_binary", description="Transform field to binary values")
817
+ def transform_to_binary(
818
+ field_values: List[float],
819
+ threshold: Optional[float] = None,
820
+ upper_value: float = 1.0,
821
+ lower_value: float = 0.0
822
+ ) -> dict:
823
+ """
824
+ Transform a field to binary values (0/1 or custom values).
825
+
826
+ Parameters:
827
+ - field_values (List[float]): Field values
828
+ - threshold (float, optional): Threshold value. If None, uses mean.
829
+ - upper_value (float): Value for above threshold (default: 1.0)
830
+ - lower_value (float): Value for below threshold (default: 0.0)
831
+
832
+ Returns:
833
+ - dict: Binary field values and statistics
834
+ """
835
+ try:
836
+ field = np.array(field_values)
837
+
838
+ if threshold is None:
839
+ threshold = np.mean(field)
840
+
841
+ transformed = binary(field, threshold=threshold,
842
+ upper=upper_value, lower=lower_value)
843
+
844
+ n_upper = np.sum(transformed == upper_value)
845
+ n_lower = np.sum(transformed == lower_value)
846
+
847
+ result = {
848
+ "original_values": field_values,
849
+ "transformed_values": transformed.tolist(),
850
+ "threshold": float(threshold),
851
+ "upper_value": upper_value,
852
+ "lower_value": lower_value,
853
+ "n_upper": int(n_upper),
854
+ "n_lower": int(n_lower),
855
+ "upper_ratio": float(n_upper / len(field)),
856
+ }
857
+ return {"success": True, "result": result, "error": None}
858
+ except Exception as e:
859
+ return {"success": False, "result": None, "error": str(e)}
860
+
861
+
862
+ @mcp.tool(name="transform_to_discrete", description="Transform field to discrete classes")
863
+ def transform_to_discrete(
864
+ field_values: List[float],
865
+ thresholds: List[float],
866
+ values: Optional[List[float]] = None
867
+ ) -> dict:
868
+ """
869
+ Transform a field to discrete classes based on thresholds.
870
+
871
+ Parameters:
872
+ - field_values (List[float]): Field values
873
+ - thresholds (List[float]): Threshold values for binning
874
+ - values (List[float], optional): Class values. If None, uses [0, 1, 2, ...]
875
+
876
+ Returns:
877
+ - dict: Discretized field and class statistics
878
+ """
879
+ try:
880
+ field = np.array(field_values)
881
+ thresholds_arr = np.array(thresholds)
882
+
883
+ if values is None:
884
+ values = list(range(len(thresholds) + 1))
885
+
886
+ transformed = discrete(field, thresholds=thresholds_arr, values=values)
887
+
888
+ # Count values in each class
889
+ class_counts = {}
890
+ for val in values:
891
+ class_counts[float(val)] = int(np.sum(transformed == val))
892
+
893
+ result = {
894
+ "original_values": field_values,
895
+ "transformed_values": transformed.tolist(),
896
+ "thresholds": thresholds,
897
+ "class_values": values,
898
+ "n_classes": len(values),
899
+ "class_counts": class_counts,
900
+ }
901
+ return {"success": True, "result": result, "error": None}
902
+ except Exception as e:
903
+ return {"success": False, "result": None, "error": str(e)}
904
+
905
+
906
+ @mcp.tool(name="apply_boxcox_transform", description="Apply Box-Cox transformation")
907
+ def apply_boxcox_transform(
908
+ field_values: List[float],
909
+ lmbda: float = 1.0,
910
+ shift: float = 0.0
911
+ ) -> dict:
912
+ """
913
+ Apply Box-Cox power transformation to field.
914
+
915
+ Parameters:
916
+ - field_values (List[float]): Field values (must be positive)
917
+ - lmbda (float): Box-Cox lambda parameter (default: 1.0)
918
+ - shift (float): Shift to add before transformation (default: 0.0)
919
+
920
+ Returns:
921
+ - dict: Transformed field values
922
+ """
923
+ try:
924
+ field = np.array(field_values)
925
+ transformed = boxcox(field, lmbda=lmbda, shift=shift)
926
+
927
+ result = {
928
+ "original_values": field_values,
929
+ "transformed_values": transformed.tolist(),
930
+ "lambda": lmbda,
931
+ "shift": shift,
932
+ "original_stats": {
933
+ "mean": float(np.mean(field)),
934
+ "std": float(np.std(field)),
935
+ },
936
+ "transformed_stats": {
937
+ "mean": float(np.mean(transformed)),
938
+ "std": float(np.std(transformed)),
939
+ }
940
+ }
941
+ return {"success": True, "result": result, "error": None}
942
+ except Exception as e:
943
+ return {"success": False, "result": None, "error": str(e)}
944
+
945
+
946
+ # ===================== Normalizers =====================
947
+
948
+ @mcp.tool(name="create_lognormal_normalizer", description="Create a LogNormal normalizer")
949
+ def create_lognormal_normalizer(
950
+ mean: float = 1.0,
951
+ var: float = 1.0
952
+ ) -> dict:
953
+ """
954
+ Create a LogNormal normalizer configuration.
955
+
956
+ Parameters:
957
+ - mean (float): Target lognormal mean (default: 1.0)
958
+ - var (float): Target lognormal variance (default: 1.0)
959
+
960
+ Returns:
961
+ - dict: Normalizer parameters
962
+ """
963
+ try:
964
+ normalizer = LogNormal(mean=mean, var=var)
965
+
966
+ result = {
967
+ "normalizer_type": "LogNormal",
968
+ "mean": mean,
969
+ "var": var,
970
+ "description": "Transforms normal fields to lognormal distribution"
971
+ }
972
+ return {"success": True, "result": result, "error": None}
973
+ except Exception as e:
974
+ return {"success": False, "result": None, "error": str(e)}
975
+
976
+
977
+ # ===================== Grid Generation Tools =====================
978
+
979
+ @mcp.tool(name="generate_grid_2d", description="Generate a 2D structured grid")
980
+ def generate_grid_2d(
981
+ x_min: float,
982
+ x_max: float,
983
+ y_min: float,
984
+ y_max: float,
985
+ nx: int,
986
+ ny: int
987
+ ) -> dict:
988
+ """
989
+ Generate a 2D structured grid with GSTools generate_grid.
990
+
991
+ Parameters:
992
+ - x_min, x_max (float): X coordinate range
993
+ - y_min, y_max (float): Y coordinate range
994
+ - nx, ny (int): Number of grid points
995
+
996
+ Returns:
997
+ - dict: Grid coordinates and meshgrid arrays
998
+ """
999
+ try:
1000
+ x = np.linspace(x_min, x_max, nx)
1001
+ y = np.linspace(y_min, y_max, ny)
1002
+
1003
+ grid_x, grid_y = generate_grid([x, y])
1004
+
1005
+ result = {
1006
+ "x": x.tolist(),
1007
+ "y": y.tolist(),
1008
+ "grid_x": grid_x.tolist(),
1009
+ "grid_y": grid_y.tolist(),
1010
+ "nx": nx,
1011
+ "ny": ny,
1012
+ "n_points": nx * ny,
1013
+ "x_extent": [x_min, x_max],
1014
+ "y_extent": [y_min, y_max],
1015
+ }
1016
+ return {"success": True, "result": result, "error": None}
1017
+ except Exception as e:
1018
+ return {"success": False, "result": None, "error": str(e)}
1019
+
1020
+
1021
+ @mcp.tool(name="calculate_rotated_axes", description="Calculate rotated main axes")
1022
+ def calculate_rotated_axes(
1023
+ dim: int,
1024
+ angles: List[float]
1025
+ ) -> dict:
1026
+ """
1027
+ Calculate rotation matrix for anisotropic models.
1028
+
1029
+ Parameters:
1030
+ - dim (int): Spatial dimension (2 or 3)
1031
+ - angles (List[float]): Rotation angles in radians
1032
+
1033
+ Returns:
1034
+ - dict: Rotation matrix and transformed axes
1035
+ """
1036
+ try:
1037
+ angles_arr = np.array(angles)
1038
+ rotation_matrix = rotated_main_axes(dim, angles_arr)
1039
+
1040
+ result = {
1041
+ "dim": dim,
1042
+ "angles_rad": angles,
1043
+ "angles_deg": (np.array(angles) * 180 / np.pi).tolist(),
1044
+ "rotation_matrix": rotation_matrix.tolist(),
1045
+ "matrix_shape": list(rotation_matrix.shape),
1046
+ }
1047
+ return {"success": True, "result": result, "error": None}
1048
+ except Exception as e:
1049
+ return {"success": False, "result": None, "error": str(e)}
1050
+
1051
+
1052
+ # ===================== Advanced Covariance Features =====================
1053
+
1054
+ @mcp.tool(name="fit_variogram_with_multiple_models", description="Fit and compare multiple variogram models")
1055
+ def fit_variogram_with_multiple_models(
1056
+ bin_center: List[float],
1057
+ gamma: List[float],
1058
+ model_types: Optional[List[str]] = None,
1059
+ nugget: bool = False
1060
+ ) -> dict:
1061
+ """
1062
+ Fit multiple covariance models and compare their fit quality.
1063
+
1064
+ Parameters:
1065
+ - bin_center (List[float]): Lag distances
1066
+ - gamma (List[float]): Empirical variogram values
1067
+ - model_types (List[str], optional): Models to fit. If None, fits all available.
1068
+ - nugget (bool): Whether to fit nugget effect (default: False)
1069
+
1070
+ Returns:
1071
+ - dict: Fitted parameters for all models and comparison metrics
1072
+ """
1073
+ try:
1074
+ if model_types is None:
1075
+ model_types = ["gaussian", "exponential", "matern", "spherical"]
1076
+
1077
+ bin_center_arr = np.array(bin_center)
1078
+ gamma_arr = np.array(gamma)
1079
+
1080
+ results = {}
1081
+ for model_type in model_types:
1082
+ model_type_lower = model_type.lower()
1083
+ if model_type_lower not in COVARIANCE_MODELS:
1084
+ continue
1085
+
1086
+ try:
1087
+ ModelClass = COVARIANCE_MODELS[model_type_lower]
1088
+ model = ModelClass(dim=2)
1089
+ model.fit_variogram(bin_center_arr, gamma_arr, nugget=nugget)
1090
+
1091
+ # Calculate fitted values and RMSE
1092
+ fitted_gamma = model.variogram(bin_center_arr)
1093
+ rmse = float(np.sqrt(np.mean((gamma_arr - fitted_gamma)**2)))
1094
+
1095
+ results[model_type] = {
1096
+ "var": model.var,
1097
+ "len_scale": float(model.len_scale),
1098
+ "nugget": model.nugget,
1099
+ "rmse": rmse,
1100
+ "fitted_values": fitted_gamma.tolist(),
1101
+ }
1102
+
1103
+ if hasattr(model, "nu"):
1104
+ results[model_type]["nu"] = model.nu
1105
+
1106
+ except Exception as e:
1107
+ results[model_type] = {"error": str(e)}
1108
+
1109
+ # Find best fit
1110
+ best_model = min(
1111
+ [k for k in results if "rmse" in results[k]],
1112
+ key=lambda k: results[k]["rmse"]
1113
+ ) if any("rmse" in v for v in results.values()) else None
1114
+
1115
+ result = {
1116
+ "models": results,
1117
+ "best_model": best_model,
1118
+ "best_rmse": results[best_model]["rmse"] if best_model else None,
1119
+ "n_models_tested": len(model_types),
1120
+ }
1121
+ return {"success": True, "result": result, "error": None}
1122
+ except Exception as e:
1123
+ return {"success": False, "result": None, "error": str(e)}
1124
+
1125
+
1126
+ @mcp.tool(name="evaluate_anisotropic_covariance", description="Evaluate anisotropic covariance model")
1127
+ def evaluate_anisotropic_covariance(
1128
+ model_type: str,
1129
+ distances_x: List[float],
1130
+ distances_y: List[float],
1131
+ var: float = 1.0,
1132
+ len_scale_x: float = 10.0,
1133
+ len_scale_y: float = 5.0,
1134
+ angle_deg: float = 0.0
1135
+ ) -> dict:
1136
+ """
1137
+ Evaluate anisotropic covariance with different length scales in each direction.
1138
+
1139
+ Parameters:
1140
+ - model_type (str): Type of covariance model
1141
+ - distances_x, distances_y (List[float]): Distances in x and y directions
1142
+ - var (float): Variance (default: 1.0)
1143
+ - len_scale_x, len_scale_y (float): Length scales in x and y (default: 10.0, 5.0)
1144
+ - angle_deg (float): Rotation angle in degrees (default: 0.0)
1145
+
1146
+ Returns:
1147
+ - dict: Anisotropic covariance values
1148
+ """
1149
+ try:
1150
+ model_type_lower = model_type.lower()
1151
+ if model_type_lower not in COVARIANCE_MODELS:
1152
+ return {
1153
+ "success": False,
1154
+ "result": None,
1155
+ "error": f"Unknown model type: {model_type}"
1156
+ }
1157
+
1158
+ ModelClass = COVARIANCE_MODELS[model_type_lower]
1159
+
1160
+ # Create anisotropic model
1161
+ anis = len_scale_y / len_scale_x # anisotropy ratio
1162
+ angle_rad = angle_deg * np.pi / 180
1163
+
1164
+ model = ModelClass(
1165
+ dim=2,
1166
+ var=var,
1167
+ len_scale=len_scale_x,
1168
+ anis=anis,
1169
+ angles=angle_rad
1170
+ )
1171
+
1172
+ # Create position arrays
1173
+ dx = np.array(distances_x)
1174
+ dy = np.array(distances_y)
1175
+
1176
+ # Calculate distances
1177
+ r = np.sqrt(dx**2 + dy**2)
1178
+
1179
+ # Evaluate covariance
1180
+ cov_values = model.covariance(np.array([dx, dy]))
1181
+
1182
+ result = {
1183
+ "model_type": model_type,
1184
+ "var": var,
1185
+ "len_scale_x": len_scale_x,
1186
+ "len_scale_y": len_scale_y,
1187
+ "anisotropy_ratio": anis,
1188
+ "rotation_angle_deg": angle_deg,
1189
+ "distances_x": distances_x,
1190
+ "distances_y": distances_y,
1191
+ "euclidean_distances": r.tolist(),
1192
+ "covariance": cov_values.tolist(),
1193
+ }
1194
+ return {"success": True, "result": result, "error": None}
1195
+ except Exception as e:
1196
+ return {"success": False, "result": None, "error": str(e)}
1197
+
1198
+
1199
+ def create_app() -> FastMCP:
1200
+ """
1201
+ Create and return the FastMCP application instance.
1202
+
1203
+ Returns:
1204
+ - The FastMCP instance for the service.
1205
+ """
1206
+ return mcp