Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
60f2178
Adds a SimpleComponent.
nrfulton Dec 11, 2025
ef6daf6
Adds a simple lazy example.
nrfulton Dec 11, 2025
2fa6967
ollama generate walk.
nrfulton Dec 12, 2025
a9be6f6
Does gather() instead of awaiting on each thunk separately.
nrfulton Dec 12, 2025
5ea5312
Refactor and bug fixes.
nrfulton Dec 12, 2025
22ac0db
backend walks.
nrfulton Dec 12, 2025
7187941
Adds heapcomponents.
nrfulton Dec 17, 2025
6ea6d46
Make uncomputed mots logging less noisy.
nrfulton Dec 17, 2025
4f37d96
adds a simple example.
nrfulton Dec 17, 2025
152ede9
Cleans up fib example.
nrfulton Dec 17, 2025
477275d
Adds parts() for instruction and genslot components.
nrfulton Dec 17, 2025
976ac06
Don't call things components which are not components.
nrfulton Dec 17, 2025
1797be4
ruff.
nrfulton Dec 17, 2025
24de761
Starts adding some examples for a deepdive on sessions.
nrfulton Dec 17, 2025
ea3e789
blah
nrfulton Dec 17, 2025
b10ba6d
blah
nrfulton Dec 17, 2025
2b07ae4
Add parts() to chat.
nrfulton Dec 24, 2025
1080f3e
Merge branch 'main' into nathan/conceptual_spans
nrfulton Dec 24, 2025
e8be711
Fixes GenerativeSlot.parts()
nrfulton Dec 24, 2025
a75fd4c
Confirm assumption that RichDocument has no parts() for now.
nrfulton Dec 24, 2025
6230138
Define parts() on TableQuery
nrfulton Dec 24, 2025
1676956
Fixes ruff errors.
nrfulton Dec 24, 2025
18707e4
Merge branch 'main' of ssh://github.com/generative-computing/mellea i…
nrfulton Dec 24, 2025
009328c
Fixes error in HeapContext.add caught by mypy.
nrfulton Dec 24, 2025
50b803a
Fixes mypy errors caused by shadowing
nrfulton Dec 24, 2025
ecceed7
Adds parts() definitions to the rest of the RichDocument components.
nrfulton Dec 24, 2025
eb8c557
fixes Instruction.parts()
nrfulton Dec 24, 2025
82852eb
Improves warning message for Intrinsic.parts()
nrfulton Dec 24, 2025
62b4a96
update comment on mify.parts()
nrfulton Dec 24, 2025
c087f12
parts() implementations for MObject components.
nrfulton Dec 24, 2025
6a143f2
parts() implementation for Requirements.
nrfulton Dec 24, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 39 additions & 0 deletions docs/examples/melp/lazy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
import asyncio
from mellea.stdlib.base import (
SimpleContext,
Context,
CBlock,
ModelOutputThunk,
SimpleComponent,
)
from mellea.backends import Backend
from mellea.backends.ollama import OllamaModelBackend

backend = OllamaModelBackend("granite4:latest")


async def fib(backend: Backend, ctx: Context, x: CBlock, y: CBlock) -> ModelOutputThunk:
sc = SimpleComponent(
instruction="What is x+y? Respond with the number only.", x=x, y=y
)
mot, _ = await backend.generate_from_context(action=sc, ctx=SimpleContext())
return mot


async def main(backend: Backend, ctx: Context):
fibs = []
for i in range(100):
if i == 0 or i == 1:
fibs.append(CBlock(f"{i + 1}"))
else:
fibs.append(await fib(backend, ctx, fibs[i - 1], fibs[i - 2]))

for x in fibs:
match x:
case ModelOutputThunk():
print(await x.avalue())
case CBlock():
print(x.value)


asyncio.run(main(backend, SimpleContext()))
44 changes: 44 additions & 0 deletions docs/examples/melp/lazy_fib.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import asyncio
from mellea.stdlib.base import (
SimpleContext,
Context,
CBlock,
ModelOutputThunk,
SimpleComponent,
)
from mellea.stdlib.requirement import Requirement
from mellea.backends import Backend
from mellea.backends.ollama import OllamaModelBackend
from typing import Tuple

backend = OllamaModelBackend("granite4:latest")


async def fib(backend: Backend, ctx: Context, x: CBlock, y: CBlock) -> ModelOutputThunk:
sc = SimpleComponent(
instruction="What is x+y? Respond with the number only.", x=x, y=y
)
mot, _ = await backend.generate_from_context(action=sc, ctx=SimpleContext())
return mot


async def fib_main(backend: Backend, ctx: Context):
fibs = []
for i in range(20):
if i == 0 or i == 1:
fibs.append(CBlock(f"{i}"))
else:
mot = await fib(backend, ctx, fibs[i - 1], fibs[i - 2])
fibs.append(mot)

print(await fibs[-1].avalue())
# for x in fibs:
# match x:
# case ModelOutputThunk():
# n = await x.avalue()
# print(n)
# case CBlock():
# print(x.value)


asyncio.run(fib_main(backend, SimpleContext()))
66 changes: 66 additions & 0 deletions docs/examples/melp/lazy_fib_sample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import asyncio
from mellea.stdlib.base import (
SimpleContext,
Context,
CBlock,
ModelOutputThunk,
SimpleComponent,
)
from mellea.stdlib.requirement import Requirement
from mellea.backends import Backend
from mellea.backends.ollama import OllamaModelBackend
from typing import Tuple

backend = OllamaModelBackend("granite4:latest")


async def _fib_sample(
backend: Backend, ctx: Context, x: CBlock, y: CBlock
) -> ModelOutputThunk | None:
sc = SimpleComponent(
instruction="What is x+y? Respond with the number only.", x=x, y=y
)
answer_mot, _ = await backend.generate_from_context(action=sc, ctx=SimpleContext())

# This is a fundamental thing: it means computation must occur.
# We need to be able to read this off at c.g. construction time.
value = await answer_mot.avalue()

try:
int(value)
return answer_mot
except:
return None


async def fib_sampling_version(
backend: Backend, ctx: Context, x: CBlock, y: CBlock
) -> ModelOutputThunk | None:
for i in range(5):
sample = await _fib_sample(backend, ctx, x, y)
if sample is not None:
return sample
else:
continue
return None


async def fib_sampling_version_main(backend: Backend, ctx: Context):
fibs = []
for i in range(20):
if i == 0 or i == 1:
fibs.append(CBlock(f"{i}"))
else:
mot = await fib_sampling_version(backend, ctx, fibs[i - 1], fibs[i - 2])
fibs.append(mot)

for x_i, x in enumerate(fibs):
match x:
case ModelOutputThunk():
n = await x.avalue()
print(n)
case CBlock():
print(x.value)


asyncio.run(fib_sampling_version_main(backend, SimpleContext()))
38 changes: 38 additions & 0 deletions docs/examples/melp/simple_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import asyncio
from mellea.stdlib.base import Context, CBlock, SimpleContext, ModelOutputThunk
from mellea.backends import Backend
from mellea.backends.ollama import OllamaModelBackend


async def main(backend: Backend, ctx: Context):
"""
In this example, we show how executing multiple MOTs in parallel should work.
"""
m_states = "Missouri", "Minnesota", "Montana", "Massachusetts"

poem_thunks = []
for state_name in m_states:
mot, ctx = await backend.generate_from_context(
CBlock(f"Write a poem about {state_name}"), ctx
)
poem_thunks.append(mot)

# Notice that what we have now is a list of ModelOutputThunks, none of which are computed.
for poem_thunk in poem_thunks:
assert type(poem_thunk) == ModelOutputThunk
print(f"Computed: {poem_thunk.is_computed()}")

# Let's run all of these in parallel.
await asyncio.gather(*[c.avalue() for c in poem_thunks])

# Print out the final results, which are now computed.
for poem_thunk in poem_thunks:
print(f"Computed: {poem_thunk.is_computed()}")

# And let's print out the final results.
for poem_thunk in poem_thunks:
print(poem_thunk.value)


backend = OllamaModelBackend(model_id="granite4:latest")
asyncio.run(main(backend, SimpleContext()))
44 changes: 44 additions & 0 deletions docs/examples/melp/states.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from mellea.stdlib.base import SimpleContext, Context, CBlock, SimpleComponent
from mellea.backends import Backend
from mellea.backends.ollama import OllamaModelBackend
import asyncio


async def main(backend: Backend, ctx: Context):
a_states = "Alaska,Arizona,Arkansas".split(",")
m_states = "Missouri", "Minnesota", "Montana", "Massachusetts"

a_state_pops = dict()
for state in a_states:
a_state_pops[state], _ = await backend.generate_from_context(
CBlock(f"What is the population of {state}? Respond with an integer only."),
SimpleContext(),
)
a_total_pop = SimpleComponent(
instruction=CBlock(
"What is the total population of these states? Respond with an integer only."
),
**a_state_pops,
)
a_state_total, _ = await backend.generate_from_context(a_total_pop, SimpleContext())

m_state_pops = dict()
for state in m_states:
m_state_pops[state], _ = await backend.generate_from_context(
CBlock(f"What is the population of {state}? Respond with an integer only."),
SimpleContext(),
)
m_total_pop = SimpleComponent(
instruction=CBlock(
"What is the total population of these states? Respond with an integer only."
),
**m_state_pops,
)
m_state_total, _ = await backend.generate_from_context(m_total_pop, SimpleContext())

print(await a_state_total.avalue())
print(await m_state_total.avalue())


backend = OllamaModelBackend(model_id="granite4:latest")
asyncio.run(main(backend, SimpleContext()))
10 changes: 10 additions & 0 deletions docs/rewrite/session_deepdive/0.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from mellea import MelleaSession
from mellea.stdlib.base import SimpleContext
from mellea.backends.ollama import OllamaModelBackend


m = MelleaSession(
backend=OllamaModelBackend("granite4:latest"), context=SimpleContext()
)
response = m.chat("What is 1+1?")
print(response.content)
11 changes: 11 additions & 0 deletions docs/rewrite/session_deepdive/1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import mellea.stdlib.functional as mfuncs
from mellea.stdlib.base import SimpleContext
from mellea.backends.ollama import OllamaModelBackend

response, next_context = mfuncs.chat(
"What is 1+1?",
context=SimpleContext(),
backend=OllamaModelBackend("granite4:latest"),
)

print(response.content)
11 changes: 11 additions & 0 deletions docs/rewrite/session_deepdive/2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import mellea.stdlib.functional as mfuncs
from mellea.stdlib.base import SimpleContext, CBlock
from mellea.backends.ollama import OllamaModelBackend

response, next_context = mfuncs.act(
CBlock("What is 1+1?"),
context=SimpleContext(),
backend=OllamaModelBackend("granite4:latest"),
)

print(response.value)
16 changes: 16 additions & 0 deletions docs/rewrite/session_deepdive/3.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import mellea.stdlib.functional as mfuncs
from mellea.stdlib.base import SimpleContext, CBlock, Context
from mellea.backends.ollama import OllamaModelBackend
from mellea.backends import Backend
import asyncio


async def main(backend: Backend, ctx: Context):
response, next_context = await mfuncs.aact(
CBlock("What is 1+1?"), context=ctx, backend=backend
)

print(response.value)


asyncio.run(main(OllamaModelBackend("granite4:latest"), SimpleContext()))
19 changes: 19 additions & 0 deletions docs/rewrite/session_deepdive/4.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import mellea.stdlib.functional as mfuncs
from mellea.stdlib.base import SimpleContext, CBlock, Context
from mellea.backends.ollama import OllamaModelBackend
from mellea.backends import Backend
import asyncio


async def main(backend: Backend, ctx: Context):
response, next_context = await backend.generate_from_context(
CBlock("What is 1+1?"),
ctx=ctx, # TODO we should rationalize ctx and context acress mfuncs and base/backend.
)

print(f"Currently computed: {response.is_computed()}")
print(await response.avalue())
print(f"Currently computed: {response.is_computed()}")


asyncio.run(main(OllamaModelBackend("granite4:latest"), SimpleContext()))
31 changes: 31 additions & 0 deletions docs/rewrite/session_deepdive/5.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import mellea.stdlib.functional as mfuncs
from mellea.stdlib.base import (
SimpleContext,
CBlock,
Context,
SimpleComponent,
Component,
)
from mellea.backends.ollama import OllamaModelBackend
from mellea.backends import Backend
import asyncio


async def main(backend: Backend, ctx: Context):
x, _ = await backend.generate_from_context(CBlock("What is 1+1?"), ctx=ctx)

y, _ = await backend.generate_from_context(CBlock("What is 2+2?"), ctx=ctx)

response, _ = await backend.generate_from_context(
SimpleComponent(instruction="What is x+y?", x=x, y=y),
ctx=ctx, # TODO we should rationalize ctx and context acress mfuncs and base/backend.
)

print(f"x currently computed: {x.is_computed()}")
print(f"y currently computed: {y.is_computed()}")
print(f"response currently computed: {response.is_computed()}")
print(await response.avalue())
print(f"response currently computed: {response.is_computed()}")


asyncio.run(main(OllamaModelBackend("granite4:latest"), SimpleContext()))
25 changes: 25 additions & 0 deletions docs/rewrite/streaming/1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import mellea.stdlib.functional as mfuncs
from mellea.stdlib.base import SimpleContext, CBlock, Context, SimpleComponent
from mellea.backends.ollama import OllamaModelBackend
from mellea.backends import Backend
import asyncio


async def main(backend: Backend, ctx: Context):
x, _ = await backend.generate_from_context(CBlock("What is 1+1?"), ctx=ctx)

y, _ = await backend.generate_from_context(CBlock("What is 2+2?"), ctx=ctx)

response, _ = await backend.generate_from_context(
SimpleComponent(instruction="What is x+y?", x=x, y=y),
ctx=ctx, # TODO we should rationalize ctx and context acress mfuncs and base/backend.
)

print(f"x currently computed: {x.is_computed()}")
print(f"y currently computed: {y.is_computed()}")
print(f"response currently computed: {response.is_computed()}")
print(await response.avalue())
print(f"response currently computed: {response.is_computed()}")


asyncio.run(main(OllamaModelBackend("granite4:latest"), SimpleContext()))
Loading
Loading