Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
112f000
Add VibeSafe project management framework
lawrennd Aug 15, 2025
d06e3d1
Add GPy community-driven development tenet
lawrennd Aug 15, 2025
90805ba
Update CIP-0001: Modernize LFM kernel implementation
lawrennd Aug 15, 2025
06a103d
Add LFM kernel implementation backlog items
lawrennd Aug 15, 2025
26ca1a6
Integrate backlog items into CIP-0001
lawrennd Aug 15, 2025
9f44a3d
Fix backlog item YAML frontmatter format
lawrennd Aug 15, 2025
883309a
Update LFM kernel code review: Initial analysis of GPy and MATLAB imp…
lawrennd Aug 15, 2025
e659649
Add backlog item for parameter tying investigation and CIP creation
lawrennd Aug 15, 2025
f058078
Update parameter tying investigation with GitHub issue findings
lawrennd Aug 15, 2025
df1164d
Update parameter tying investigation with paramz dependency analysis
lawrennd Aug 15, 2025
de37894
Add CIP-0002: Parameter Tying Framework for GPy - Community Discussion
lawrennd Aug 15, 2025
c8e98f9
Update LFM kernel backlog items - complete code review, start design …
lawrennd Aug 15, 2025
7227ad8
Complete test-driven design phase for LFM kernel - Add comprehensive …
lawrennd Aug 15, 2025
ca1d4bf
Add MATLAB comparison framework for LFM kernel validation
lawrennd Aug 15, 2025
5ae4101
Remove MATLAB comparison script - will be built as external validatio…
lawrennd Aug 15, 2025
74c39c8
Add initial LFM1 kernel implementation with basic structure and param…
lawrennd Aug 15, 2025
bcaa567
Discover LFM kernels already exist as EQ_ODE1 and EQ_ODE2 - update do…
lawrennd Aug 15, 2025
419be7b
Fix and improve LFM kernel implementation
lawrennd Aug 15, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 44 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -61,3 +61,47 @@ settings.json
.eggs
.venv
.env

# VibeSafe System Files (Auto-added during installation)
# These are VibeSafe infrastructure - not your project content

# Backlog system files
backlog/README.md
backlog/task_template.md
backlog/update_index.py
backlog/index.md

# CIP system files
cip/README.md
cip/cip_template.md

# Cursor AI rules (VibeSafe-managed)
.cursor/rules/
# Generated project tenet cursor rules
.cursor/rules/project_tenet_*.mdc

# VibeSafe scripts and tools
scripts/whats_next.py
install-whats-next.sh
whats-next

# VibeSafe templates directory
templates/

# AI-Requirements framework (VibeSafe-managed)
ai-requirements/README.md
ai-requirements/requirement_template.md
ai-requirements/prompts/
ai-requirements/patterns/
ai-requirements/integrations/
ai-requirements/examples/
ai-requirements/guidance/

# Tenets system files
tenets/README.md
tenets/tenet_template.md
tenets/combine_tenets.py

# VibeSafe documentation (system files)
docs/whats_next_script.md
docs/yaml_frontmatter_examples.md
141 changes: 111 additions & 30 deletions GPy/kern/src/eq_ode1.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,28 +11,43 @@

class EQ_ODE1(Kern):
"""
Covariance function for first order differential equation driven by an exponentiated quadratic covariance.

This outputs of this kernel have the form
Latent Force Model (LFM) kernel for first-order differential equations (Single Input Motif - SIM).

This kernel implements the covariance function for first-order differential equations driven by
an exponentiated quadratic (RBF) covariance, which is the foundation of Latent Force Models.

The outputs of this kernel have the form:
.. math::
\\frac{\\text{d}y_j}{\\text{d}t} = \\sum_{i=1}^R w_{j,i} u_i(t-\\delta_j) - d_jy_j(t)

where :math:`R` is the rank of the system, :math:`w_{j,i}` is the sensitivity of the :math:`j`th output to the :math:`i`th latent function, :math:`d_j` is the decay rate of the :math:`j`th output and :math:`u_i(t)` are independent latent Gaussian processes goverened by an exponentiated quadratic covariance.
where :math:`R` is the rank of the system, :math:`w_{j,i}` is the sensitivity of the :math:`j`th output
to the :math:`i`th latent function, :math:`d_j` is the decay rate of the :math:`j`th output and
:math:`u_i(t)` are independent latent Gaussian processes governed by an exponentiated quadratic covariance.

This kernel is equivalent to the SIM (Single Input Motif) kernel from the GPmat toolbox and
implements the mathematical framework described in:

- Lawrence et al. (2006): "Modelling transcriptional regulation using Gaussian Processes"

:param output_dim: number of outputs driven by latent function.
:param input_dim: Input dimension (must be 2: time + output index)
:type input_dim: int
:param output_dim: Number of outputs driven by latent function
:type output_dim: int
:param W: sensitivities of each output to the latent driving function.
:type W: ndarray (output_dim x rank).
:param rank: If rank is greater than 1 then there are assumed to be a total of rank latent forces independently driving the system, each with identical covariance.
:param rank: Number of latent forces. If rank > 1, there are multiple latent forces independently driving the system
:type rank: int
:param decay: decay rates for the first order system.
:type decay: array of length output_dim.
:param delay: delay between latent force and output response.
:type delay: array of length output_dim.
:param kappa: diagonal term that allows each latent output to have an independent component to the response.
:type kappa: array of length output_dim.

.. Note: see first order differential equation examples in GPy.examples.regression for some usage.
:param W: Sensitivity matrix of each output to the latent driving functions (output_dim x rank)
:type W: ndarray
:param lengthscale: Lengthscale(s) of the RBF kernel for latent forces
:type lengthscale: float or array
:param decay: Decay rates for the first order system (array of length output_dim)
:type decay: array
:param active_dims: Active dimensions for the kernel
:type active_dims: array
:param name: Name of the kernel
:type name: str

.. Note: See first order differential equation examples in GPy.examples.regression for usage examples.
.. Note: This kernel requires input_dim=2 where the first dimension is time and the second is the output index.
"""

def __init__(
Expand Down Expand Up @@ -713,19 +728,85 @@ def _gkfu_z(self, X, index, Z, index2): # Kfu(t,z)


def lnDifErf(z1, z2):
# Z2 is always positive
"""
Compute log of difference of two erfs in a numerically stable manner.
Based on MATLAB implementation by Antti Honkela and David Luengo.

Args:
z1: First argument (scalar or array)
z2: Second argument (scalar or array, assumed to be positive)

Returns:
log(abs(erf(z1) - erf(z2)))
"""
# Convert to numpy arrays if scalars
z1 = np.asarray(z1)
z2 = np.asarray(z2)

# Handle scalar inputs
if z1.ndim == 0 and z2.ndim == 0:
# Scalar case
if z1 == z2:
return -np.inf
elif (z1 * z2) < 0:
# Different signs
diff = np.abs(erf(z1) - erf(z2))
return np.log(np.maximum(diff, 1e-300))
elif z1 > 0 and z2 > 0:
# Both positive
diff = erfcx(z2) - erfcx(z1) * np.exp(z2**2 - z1**2)
return np.log(np.maximum(diff, 1e-300)) - z2**2
elif z1 < 0 and z2 < 0:
# Both negative
diff = erfcx(-z1) - erfcx(-z2) * np.exp(z1**2 - z2**2)
return np.log(np.maximum(diff, 1e-300)) - z1**2
else:
# One or both zero
diff = np.abs(erf(z1) - erf(z2))
return np.log(np.maximum(diff, 1e-300))

# Array case
# Initialize result
logdiferf = np.zeros(z1.shape)
ind = np.where(z1 > 0.0)
ind2 = np.where(z1 <= 0.0)
if ind[0].shape > 0:
z1i = z1[ind]
z12 = z1i * z1i
z2i = z2[ind]
logdiferf[ind] = -z12 + np.log(erfcx(z1i) - erfcx(z2i) * np.exp(z12 - z2i**2))

if ind2[0].shape > 0:
z1i = z1[ind2]
z2i = z2[ind2]
logdiferf[ind2] = np.log(erf(z2i) - erf(z1i))


# Case 1: Arguments of different signs, no problems with loss of accuracy
I1 = (z1 * z2) < 0
if np.any(I1):
diff = np.abs(erf(z1[I1]) - erf(z2[I1]))
# Add safeguard for very small differences
diff = np.maximum(diff, 1e-300)
logdiferf[I1] = np.log(diff)

# Case 2: z1 = z2
I2 = z1 == z2 # Use exact equality
if np.any(I2):
logdiferf[I2] = -np.inf

# Case 3: Both arguments are positive
I3 = (z1 > 0) & (z2 > 0) & ~I1 & ~I2
if np.any(I3):
# Use erfcx for numerical stability
diff = erfcx(z2[I3]) - erfcx(z1[I3]) * np.exp(z2[I3]**2 - z1[I3]**2)
# Add safeguard for very small differences
diff = np.maximum(diff, 1e-300)
logdiferf[I3] = np.log(diff) - z2[I3]**2

# Case 4: Both arguments are negative
I4 = (z1 < 0) & (z2 < 0) & ~I1 & ~I2
if np.any(I4):
# Use erfcx with negative arguments
diff = erfcx(-z1[I4]) - erfcx(-z2[I4]) * np.exp(z1[I4]**2 - z2[I4]**2)
# Add safeguard for very small differences
diff = np.maximum(diff, 1e-300)
logdiferf[I4] = np.log(diff) - z1[I4]**2

# Case 5: Other cases (one or both zero, mixed signs)
I5 = ~I1 & ~I2 & ~I3 & ~I4
if np.any(I5):
# Use direct erf computation
diff = np.abs(erf(z1[I5]) - erf(z2[I5]))
# Add safeguard for very small differences
diff = np.maximum(diff, 1e-300)
logdiferf[I5] = np.log(diff)

return logdiferf
Loading
Loading