Skip to content
Draft
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
141 changes: 141 additions & 0 deletions common/src/main/java/org/apache/comet/udf/CometUdfBridge.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.comet.udf;

import java.util.LinkedHashMap;
import java.util.Map;

import org.apache.arrow.c.ArrowArray;
import org.apache.arrow.c.ArrowSchema;
import org.apache.arrow.c.Data;
import org.apache.arrow.memory.BufferAllocator;
import org.apache.arrow.vector.FieldVector;
import org.apache.arrow.vector.ValueVector;

/**
* JNI entry point for native execution to invoke a {@link CometUDF}. Matches the static-method
* pattern used by CometScalarSubquery so the native side can dispatch via
* call_static_method_unchecked.
*/
public class CometUdfBridge {

// Per-thread, bounded LRU of UDF instances keyed by class name. Comet
// native execution threads (Tokio/DataFusion worker pool) are reused
// across tasks within an executor, so the effective lifetime of cached
// entries is the worker thread (i.e. the executor JVM). This is fine for
// stateless UDFs like ArrayExistsUDF; future stateful UDFs would need
// explicit per-task isolation.
private static final int CACHE_CAPACITY = 64;

private static final ThreadLocal<LinkedHashMap<String, CometUDF>> INSTANCES =
ThreadLocal.withInitial(
() ->
new LinkedHashMap<String, CometUDF>(CACHE_CAPACITY, 0.75f, true) {
@Override
protected boolean removeEldestEntry(Map.Entry<String, CometUDF> eldest) {
return size() > CACHE_CAPACITY;
}
});

/**
* Called from native via JNI.
*
* @param udfClassName fully-qualified class name implementing CometUDF
* @param inputArrayPtrs addresses of pre-allocated FFI_ArrowArray structs (one per input)
* @param inputSchemaPtrs addresses of pre-allocated FFI_ArrowSchema structs (one per input)
* @param outArrayPtr address of pre-allocated FFI_ArrowArray for the result
* @param outSchemaPtr address of pre-allocated FFI_ArrowSchema for the result
*/
public static void evaluate(
String udfClassName,
long[] inputArrayPtrs,
long[] inputSchemaPtrs,
long outArrayPtr,
long outSchemaPtr) {
LinkedHashMap<String, CometUDF> cache = INSTANCES.get();
CometUDF udf = cache.get(udfClassName);
if (udf == null) {
try {
// Resolve via the executor's context classloader so user-supplied UDF jars
// (added via spark.jars / --jars) are visible.
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null) {
cl = CometUdfBridge.class.getClassLoader();
}
udf =
(CometUDF) Class.forName(udfClassName, true, cl).getDeclaredConstructor().newInstance();
} catch (ReflectiveOperationException e) {
throw new RuntimeException("Failed to instantiate CometUDF: " + udfClassName, e);
}
cache.put(udfClassName, udf);
}

BufferAllocator allocator = org.apache.comet.package$.MODULE$.CometArrowAllocator();

ValueVector[] inputs = new ValueVector[inputArrayPtrs.length];
ValueVector result = null;
try {
for (int i = 0; i < inputArrayPtrs.length; i++) {
ArrowArray inArr = ArrowArray.wrap(inputArrayPtrs[i]);
ArrowSchema inSch = ArrowSchema.wrap(inputSchemaPtrs[i]);
inputs[i] = Data.importVector(allocator, inArr, inSch, null);
}

result = udf.evaluate(inputs);
if (!(result instanceof FieldVector)) {
throw new RuntimeException(
"CometUDF.evaluate() must return a FieldVector, got: " + result.getClass().getName());
}
// Result length must match the longest input. Scalar (length-1) inputs
// are allowed to be shorter, but a vector input bounds the output.
int expectedLen = 0;
for (ValueVector v : inputs) {
expectedLen = Math.max(expectedLen, v.getValueCount());
}
if (result.getValueCount() != expectedLen) {
throw new RuntimeException(
"CometUDF.evaluate() returned "
+ result.getValueCount()
+ " rows, expected "
+ expectedLen);
}
ArrowArray outArr = ArrowArray.wrap(outArrayPtr);
ArrowSchema outSch = ArrowSchema.wrap(outSchemaPtr);
Data.exportVector(allocator, (FieldVector) result, null, outArr, outSch);
} finally {
for (ValueVector v : inputs) {
if (v != null) {
try {
v.close();
} catch (RuntimeException ignored) {
// do not mask the original throwable
}
}
}
if (result != null) {
try {
result.close();
} catch (RuntimeException ignored) {
// do not mask the original throwable
}
}
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.comet.udf

import java.util.UUID
import java.util.concurrent.ConcurrentHashMap

import org.apache.spark.sql.catalyst.expressions.Expression

/**
* Thread-safe registry bridging plan-time Spark expressions to execution-time UDF lookup. At plan
* time the serde layer registers a lambda expression under a unique key; at execution time the
* UDF retrieves it by that key (passed as a scalar argument).
*/
object CometLambdaRegistry {

private val registry = new ConcurrentHashMap[String, Expression]()

def register(expression: Expression): String = {
val key = UUID.randomUUID().toString
registry.put(key, expression)
key
}

def get(key: String): Expression = {
val expr = registry.get(key)
if (expr == null) {
throw new IllegalStateException(
s"Lambda expression not found in registry for key: $key. " +
"This indicates a lifecycle issue between plan creation and execution.")
}
expr
}

def remove(key: String): Unit = {
registry.remove(key)
}

// Visible for testing
def size(): Int = registry.size()
}
37 changes: 37 additions & 0 deletions common/src/main/scala/org/apache/comet/udf/CometUDF.scala
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.comet.udf

import org.apache.arrow.vector.ValueVector

/**
* Scalar UDF invoked from native execution via JNI. Receives Arrow vectors as input and returns
* an Arrow vector.
*
* - Vector arguments arrive at the row count of the current batch.
* - Scalar (literal-folded) arguments arrive as length-1 vectors and must be read at index 0.
* - The returned vector's length must match the longest input.
*
* Implementations must have a public no-arg constructor and should be stateless: instances are
* cached per executor thread for the lifetime of the JVM.
*/
trait CometUDF {
def evaluate(inputs: Array[ValueVector]): ValueVector
}
127 changes: 127 additions & 0 deletions common/src/main/scala/org/apache/comet/udf/CometUdfRegistry.scala
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.comet.udf

import java.util.concurrent.ConcurrentHashMap

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.DataType

/**
* Registry for user-defined CometUDF implementations. Users register their UDF class names here
* so that the Comet serde layer can intercept matching Spark UDFs and route them to native
* execution via the JVM UDF bridge.
*
* Usage:
* {{{
* // Register a CometUDF implementation for a Spark UDF
* CometUdfRegistry.register(
* "my_func", // Spark UDF name (as used in spark.udf.register)
* "com.example.MyUdf", // CometUDF implementation class
* BooleanType, // return type
* nullable = true // whether the result may contain nulls
* )
*
* // Or use the convenience method that also registers the Spark UDF:
* CometUdfRegistry.register(
* spark,
* "my_func",
* "com.example.MyUdf",
* sparkUdf, // the Spark UserDefinedFunction
* BooleanType,
* nullable = true
* )
* }}}
*/
object CometUdfRegistry {

case class UdfEntry(className: String, returnType: DataType, nullable: Boolean)

private val registry = new ConcurrentHashMap[String, UdfEntry]()

/**
* Register a CometUDF implementation for a named Spark UDF.
*
* @param name
* The UDF name as registered with Spark (via spark.udf.register)
* @param className
* Fully-qualified class name implementing CometUDF
* @param returnType
* The return DataType of the UDF
* @param nullable
* Whether the result column may contain nulls
*/
def register(name: String, className: String, returnType: DataType, nullable: Boolean): Unit = {
registry.put(name, UdfEntry(className, returnType, nullable))
}

/**
* Convenience method that registers both with Spark and with Comet in one call.
*
* @param spark
* The SparkSession
* @param name
* The UDF name
* @param className
* Fully-qualified CometUDF class name
* @param sparkUdf
* The Spark UserDefinedFunction (for row-at-a-time fallback)
* @param returnType
* The return DataType
* @param nullable
* Whether the result may contain nulls
*/
def register(
spark: SparkSession,
name: String,
className: String,
sparkUdf: org.apache.spark.sql.expressions.UserDefinedFunction,
returnType: DataType,
nullable: Boolean): Unit = {
spark.udf.register(name, sparkUdf)
registry.put(name, UdfEntry(className, returnType, nullable))
}

/**
* Look up a registered CometUDF by its Spark UDF name.
*
* @return
* Some(UdfEntry) if registered, None otherwise
*/
def get(name: String): Option[UdfEntry] = Option(registry.get(name))

/**
* Remove a previously registered UDF.
*/
def remove(name: String): Unit = {
registry.remove(name)
}

/**
* Check whether a UDF name is registered.
*/
def isRegistered(name: String): Boolean = registry.containsKey(name)

// Visible for testing
def size(): Int = registry.size()

// Visible for testing
def clear(): Unit = registry.clear()
}
Loading
Loading