CAMEL-14263: camel-spark-rest should use source code generated configurer to avoid reflection configuration.
diff --git a/components/camel-spark-rest/src/main/java/org/apache/camel/component/sparkrest/SparkComponent.java b/components/camel-spark-rest/src/main/java/org/apache/camel/component/sparkrest/SparkComponent.java
index 6446d1c..be537a4 100644
--- a/components/camel-spark-rest/src/main/java/org/apache/camel/component/sparkrest/SparkComponent.java
+++ b/components/camel-spark-rest/src/main/java/org/apache/camel/component/sparkrest/SparkComponent.java
@@ -204,8 +204,6 @@
@Override
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
SparkConfiguration config = getSparkConfiguration().copy();
- //TODO: we need to remove the usage of setProperties for populating the copy of the configuration
- setProperties(config, parameters);
SparkEndpoint answer = new SparkEndpoint(uri, this);
answer.setSparkConfiguration(config);
diff --git a/core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/SparkEndpointBuilderFactory.java b/core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/SparkEndpointBuilderFactory.java
index 1c0237d..c3eea0f 100644
--- a/core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/SparkEndpointBuilderFactory.java
+++ b/core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/SparkEndpointBuilderFactory.java
@@ -17,13 +17,15 @@
package org.apache.camel.builder.endpoint.dsl;
import javax.annotation.Generated;
+import org.apache.camel.ExchangePattern;
import org.apache.camel.builder.EndpointConsumerBuilder;
import org.apache.camel.builder.EndpointProducerBuilder;
import org.apache.camel.builder.endpoint.AbstractEndpointBuilder;
+import org.apache.camel.spi.ExceptionHandler;
/**
- * The spark component can be used to send RDD or DataFrame jobs to Apache Spark
- * cluster.
+ * The spark-rest component is used for hosting REST services which has been
+ * defined using Camel rest-dsl.
*
* Generated by camel-package-maven-plugin - do not edit this file!
*/
@@ -32,180 +34,263 @@
/**
- * Builder for endpoint for the Spark component.
+ * Builder for endpoint for the Spark Rest component.
*/
- public interface SparkEndpointBuilder extends EndpointProducerBuilder {
+ public interface SparkEndpointBuilder extends EndpointConsumerBuilder {
default AdvancedSparkEndpointBuilder advanced() {
return (AdvancedSparkEndpointBuilder) this;
}
/**
- * Indicates if results should be collected or counted.
+ * Accept type such as: 'text/xml', or 'application/json'. By default we
+ * accept all kinds of types.
+ *
+ * The option is a: <code>java.lang.String</code> type.
+ *
+ * Group: consumer
+ */
+ default SparkEndpointBuilder accept(String accept) {
+ doSetProperty("accept", accept);
+ return this;
+ }
+ /**
+ * Allows for bridging the consumer to the Camel routing Error Handler,
+ * which mean any exceptions occurred while the consumer is trying to
+ * pickup incoming messages, or the likes, will now be processed as a
+ * message and handled by the routing Error Handler. By default the
+ * consumer will use the org.apache.camel.spi.ExceptionHandler to deal
+ * with exceptions, that will be logged at WARN or ERROR level and
+ * ignored.
*
* The option is a: <code>boolean</code> type.
*
- * Group: producer
+ * Group: consumer
*/
- default SparkEndpointBuilder collect(boolean collect) {
- doSetProperty("collect", collect);
+ default SparkEndpointBuilder bridgeErrorHandler(
+ boolean bridgeErrorHandler) {
+ doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
- * Indicates if results should be collected or counted.
+ * Allows for bridging the consumer to the Camel routing Error Handler,
+ * which mean any exceptions occurred while the consumer is trying to
+ * pickup incoming messages, or the likes, will now be processed as a
+ * message and handled by the routing Error Handler. By default the
+ * consumer will use the org.apache.camel.spi.ExceptionHandler to deal
+ * with exceptions, that will be logged at WARN or ERROR level and
+ * ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
- * Group: producer
+ * Group: consumer
*/
- default SparkEndpointBuilder collect(String collect) {
- doSetProperty("collect", collect);
+ default SparkEndpointBuilder bridgeErrorHandler(
+ String bridgeErrorHandler) {
+ doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
- * DataFrame to compute against.
- *
- * The option is a:
- * <code>org.apache.spark.sql.Dataset<org.apache.spark.sql.Row></code> type.
- *
- * Group: producer
- */
- default SparkEndpointBuilder dataFrame(Object dataFrame) {
- doSetProperty("dataFrame", dataFrame);
- return this;
- }
- /**
- * DataFrame to compute against.
- *
- * The option will be converted to a
- * <code>org.apache.spark.sql.Dataset<org.apache.spark.sql.Row></code> type.
- *
- * Group: producer
- */
- default SparkEndpointBuilder dataFrame(String dataFrame) {
- doSetProperty("dataFrame", dataFrame);
- return this;
- }
- /**
- * Function performing action against an DataFrame.
- *
- * The option is a:
- * <code>org.apache.camel.component.spark.DataFrameCallback</code> type.
- *
- * Group: producer
- */
- default SparkEndpointBuilder dataFrameCallback(Object dataFrameCallback) {
- doSetProperty("dataFrameCallback", dataFrameCallback);
- return this;
- }
- /**
- * Function performing action against an DataFrame.
- *
- * The option will be converted to a
- * <code>org.apache.camel.component.spark.DataFrameCallback</code> type.
- *
- * Group: producer
- */
- default SparkEndpointBuilder dataFrameCallback(String dataFrameCallback) {
- doSetProperty("dataFrameCallback", dataFrameCallback);
- return this;
- }
- /**
- * Whether the producer should be started lazy (on the first message).
- * By starting lazy you can use this to allow CamelContext and routes to
- * startup in situations where a producer may otherwise fail during
- * starting and cause the route to fail being started. By deferring this
- * startup to be lazy then the startup failure can be handled during
- * routing messages via Camel's routing error handlers. Beware that when
- * the first message is processed then creating and starting the
- * producer may take a little time and prolong the total processing time
- * of the processing.
+ * Determines whether or not the raw input stream from Spark
+ * HttpRequest#getContent() is cached or not (Camel will read the stream
+ * into a in light-weight memory based Stream caching) cache. By default
+ * Camel will cache the Netty input stream to support reading it
+ * multiple times to ensure Camel can retrieve all data from the stream.
+ * However you can set this option to true when you for example need to
+ * access the raw stream, such as streaming it directly to a file or
+ * other persistent store. Mind that if you enable this option, then you
+ * cannot read the Netty stream multiple times out of the box, and you
+ * would need manually to reset the reader index on the Spark raw
+ * stream.
*
* The option is a: <code>boolean</code> type.
*
- * Group: producer
+ * Group: consumer
*/
- default SparkEndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
- doSetProperty("lazyStartProducer", lazyStartProducer);
+ default SparkEndpointBuilder disableStreamCache(
+ boolean disableStreamCache) {
+ doSetProperty("disableStreamCache", disableStreamCache);
return this;
}
/**
- * Whether the producer should be started lazy (on the first message).
- * By starting lazy you can use this to allow CamelContext and routes to
- * startup in situations where a producer may otherwise fail during
- * starting and cause the route to fail being started. By deferring this
- * startup to be lazy then the startup failure can be handled during
- * routing messages via Camel's routing error handlers. Beware that when
- * the first message is processed then creating and starting the
- * producer may take a little time and prolong the total processing time
- * of the processing.
+ * Determines whether or not the raw input stream from Spark
+ * HttpRequest#getContent() is cached or not (Camel will read the stream
+ * into a in light-weight memory based Stream caching) cache. By default
+ * Camel will cache the Netty input stream to support reading it
+ * multiple times to ensure Camel can retrieve all data from the stream.
+ * However you can set this option to true when you for example need to
+ * access the raw stream, such as streaming it directly to a file or
+ * other persistent store. Mind that if you enable this option, then you
+ * cannot read the Netty stream multiple times out of the box, and you
+ * would need manually to reset the reader index on the Spark raw
+ * stream.
*
* The option will be converted to a <code>boolean</code> type.
*
- * Group: producer
+ * Group: consumer
*/
- default SparkEndpointBuilder lazyStartProducer(String lazyStartProducer) {
- doSetProperty("lazyStartProducer", lazyStartProducer);
+ default SparkEndpointBuilder disableStreamCache(
+ String disableStreamCache) {
+ doSetProperty("disableStreamCache", disableStreamCache);
return this;
}
/**
- * RDD to compute against.
+ * If this option is enabled, then during binding from Spark to Camel
+ * Message then the headers will be mapped as well (eg added as header
+ * to the Camel Message as well). You can turn off this option to
+ * disable this. The headers can still be accessed from the
+ * org.apache.camel.component.sparkrest.SparkMessage message with the
+ * method getRequest() that returns the Spark HTTP request instance.
*
- * The option is a: <code>org.apache.spark.api.java.JavaRDDLike</code>
- * type.
+ * The option is a: <code>boolean</code> type.
*
- * Group: producer
+ * Group: consumer
*/
- default SparkEndpointBuilder rdd(Object rdd) {
- doSetProperty("rdd", rdd);
+ default SparkEndpointBuilder mapHeaders(boolean mapHeaders) {
+ doSetProperty("mapHeaders", mapHeaders);
return this;
}
/**
- * RDD to compute against.
+ * If this option is enabled, then during binding from Spark to Camel
+ * Message then the headers will be mapped as well (eg added as header
+ * to the Camel Message as well). You can turn off this option to
+ * disable this. The headers can still be accessed from the
+ * org.apache.camel.component.sparkrest.SparkMessage message with the
+ * method getRequest() that returns the Spark HTTP request instance.
*
- * The option will be converted to a
- * <code>org.apache.spark.api.java.JavaRDDLike</code> type.
+ * The option will be converted to a <code>boolean</code> type.
*
- * Group: producer
+ * Group: consumer
*/
- default SparkEndpointBuilder rdd(String rdd) {
- doSetProperty("rdd", rdd);
+ default SparkEndpointBuilder mapHeaders(String mapHeaders) {
+ doSetProperty("mapHeaders", mapHeaders);
return this;
}
/**
- * Function performing action against an RDD.
+ * If enabled and an Exchange failed processing on the consumer side,
+ * and if the caused Exception was send back serialized in the response
+ * as a application/x-java-serialized-object content type. This is by
+ * default turned off. If you enable this then be aware that Java will
+ * deserialize the incoming data from the request to Java and that can
+ * be a potential security risk.
*
- * The option is a:
- * <code>org.apache.camel.component.spark.RddCallback</code> type.
+ * The option is a: <code>boolean</code> type.
*
- * Group: producer
+ * Group: consumer
*/
- default SparkEndpointBuilder rddCallback(Object rddCallback) {
- doSetProperty("rddCallback", rddCallback);
+ default SparkEndpointBuilder transferException(boolean transferException) {
+ doSetProperty("transferException", transferException);
return this;
}
/**
- * Function performing action against an RDD.
+ * If enabled and an Exchange failed processing on the consumer side,
+ * and if the caused Exception was send back serialized in the response
+ * as a application/x-java-serialized-object content type. This is by
+ * default turned off. If you enable this then be aware that Java will
+ * deserialize the incoming data from the request to Java and that can
+ * be a potential security risk.
*
- * The option will be converted to a
- * <code>org.apache.camel.component.spark.RddCallback</code> type.
+ * The option will be converted to a <code>boolean</code> type.
*
- * Group: producer
+ * Group: consumer
*/
- default SparkEndpointBuilder rddCallback(String rddCallback) {
- doSetProperty("rddCallback", rddCallback);
+ default SparkEndpointBuilder transferException(String transferException) {
+ doSetProperty("transferException", transferException);
+ return this;
+ }
+ /**
+ * If this option is enabled, then during binding from Spark to Camel
+ * Message then the header values will be URL decoded (eg %20 will be a
+ * space character.).
+ *
+ * The option is a: <code>boolean</code> type.
+ *
+ * Group: consumer
+ */
+ default SparkEndpointBuilder urlDecodeHeaders(boolean urlDecodeHeaders) {
+ doSetProperty("urlDecodeHeaders", urlDecodeHeaders);
+ return this;
+ }
+ /**
+ * If this option is enabled, then during binding from Spark to Camel
+ * Message then the header values will be URL decoded (eg %20 will be a
+ * space character.).
+ *
+ * The option will be converted to a <code>boolean</code> type.
+ *
+ * Group: consumer
+ */
+ default SparkEndpointBuilder urlDecodeHeaders(String urlDecodeHeaders) {
+ doSetProperty("urlDecodeHeaders", urlDecodeHeaders);
return this;
}
}
/**
- * Advanced builder for endpoint for the Spark component.
+ * Advanced builder for endpoint for the Spark Rest component.
*/
public interface AdvancedSparkEndpointBuilder
extends
- EndpointProducerBuilder {
+ EndpointConsumerBuilder {
default SparkEndpointBuilder basic() {
return (SparkEndpointBuilder) this;
}
/**
+ * To let the consumer use a custom ExceptionHandler. Notice if the
+ * option bridgeErrorHandler is enabled then this option is not in use.
+ * By default the consumer will deal with exceptions, that will be
+ * logged at WARN or ERROR level and ignored.
+ *
+ * The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
+ * type.
+ *
+ * Group: consumer (advanced)
+ */
+ default AdvancedSparkEndpointBuilder exceptionHandler(
+ ExceptionHandler exceptionHandler) {
+ doSetProperty("exceptionHandler", exceptionHandler);
+ return this;
+ }
+ /**
+ * To let the consumer use a custom ExceptionHandler. Notice if the
+ * option bridgeErrorHandler is enabled then this option is not in use.
+ * By default the consumer will deal with exceptions, that will be
+ * logged at WARN or ERROR level and ignored.
+ *
+ * The option will be converted to a
+ * <code>org.apache.camel.spi.ExceptionHandler</code> type.
+ *
+ * Group: consumer (advanced)
+ */
+ default AdvancedSparkEndpointBuilder exceptionHandler(
+ String exceptionHandler) {
+ doSetProperty("exceptionHandler", exceptionHandler);
+ return this;
+ }
+ /**
+ * Sets the exchange pattern when the consumer creates an exchange.
+ *
+ * The option is a: <code>org.apache.camel.ExchangePattern</code> type.
+ *
+ * Group: consumer (advanced)
+ */
+ default AdvancedSparkEndpointBuilder exchangePattern(
+ ExchangePattern exchangePattern) {
+ doSetProperty("exchangePattern", exchangePattern);
+ return this;
+ }
+ /**
+ * Sets the exchange pattern when the consumer creates an exchange.
+ *
+ * The option will be converted to a
+ * <code>org.apache.camel.ExchangePattern</code> type.
+ *
+ * Group: consumer (advanced)
+ */
+ default AdvancedSparkEndpointBuilder exchangePattern(
+ String exchangePattern) {
+ doSetProperty("exchangePattern", exchangePattern);
+ return this;
+ }
+ /**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
@@ -232,6 +317,56 @@
return this;
}
/**
+ * Whether or not the consumer should try to find a target consumer by
+ * matching the URI prefix if no exact match is found.
+ *
+ * The option is a: <code>boolean</code> type.
+ *
+ * Group: advanced
+ */
+ default AdvancedSparkEndpointBuilder matchOnUriPrefix(
+ boolean matchOnUriPrefix) {
+ doSetProperty("matchOnUriPrefix", matchOnUriPrefix);
+ return this;
+ }
+ /**
+ * Whether or not the consumer should try to find a target consumer by
+ * matching the URI prefix if no exact match is found.
+ *
+ * The option will be converted to a <code>boolean</code> type.
+ *
+ * Group: advanced
+ */
+ default AdvancedSparkEndpointBuilder matchOnUriPrefix(
+ String matchOnUriPrefix) {
+ doSetProperty("matchOnUriPrefix", matchOnUriPrefix);
+ return this;
+ }
+ /**
+ * To use a custom SparkBinding to map to/from Camel message.
+ *
+ * The option is a:
+ * <code>org.apache.camel.component.sparkrest.SparkBinding</code> type.
+ *
+ * Group: advanced
+ */
+ default AdvancedSparkEndpointBuilder sparkBinding(Object sparkBinding) {
+ doSetProperty("sparkBinding", sparkBinding);
+ return this;
+ }
+ /**
+ * To use a custom SparkBinding to map to/from Camel message.
+ *
+ * The option will be converted to a
+ * <code>org.apache.camel.component.sparkrest.SparkBinding</code> type.
+ *
+ * Group: advanced
+ */
+ default AdvancedSparkEndpointBuilder sparkBinding(String sparkBinding) {
+ doSetProperty("sparkBinding", sparkBinding);
+ return this;
+ }
+ /**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
@@ -257,24 +392,28 @@
}
}
/**
- * Spark (camel-spark)
- * The spark component can be used to send RDD or DataFrame jobs to Apache
- * Spark cluster.
+ * Spark Rest (camel-spark-rest)
+ * The spark-rest component is used for hosting REST services which has been
+ * defined using Camel rest-dsl.
*
- * Category: bigdata,iot
- * Since: 2.17
- * Maven coordinates: org.apache.camel:camel-spark
+ * Category: rest
+ * Since: 2.14
+ * Maven coordinates: org.apache.camel:camel-spark-rest
*
- * Syntax: <code>spark:endpointType</code>
+ * Syntax: <code>spark-rest:verb:path</code>
*
- * Path parameter: endpointType (required)
- * Type of the endpoint (rdd, dataframe, hive).
- * The value can be one of: rdd, dataframe, hive
+ * Path parameter: verb (required)
+ * get, post, put, patch, delete, head, trace, connect, or options.
+ * The value can be one of: get, post, put, patch, delete, head, trace,
+ * connect, options
+ *
+ * Path parameter: path (required)
+ * The content path which support Spark syntax.
*/
- default SparkEndpointBuilder spark(String path) {
+ default SparkEndpointBuilder sparkRest(String path) {
class SparkEndpointBuilderImpl extends AbstractEndpointBuilder implements SparkEndpointBuilder, AdvancedSparkEndpointBuilder {
public SparkEndpointBuilderImpl(String path) {
- super("spark", path);
+ super("spark-rest", path);
}
}
return new SparkEndpointBuilderImpl(path);