Fix #385 - Introduce Knative deployments for workflows  (#447)

diff --git a/api/metadata/annotations.go b/api/metadata/annotations.go
index a0980ab..a8f5609 100644
--- a/api/metadata/annotations.go
+++ b/api/metadata/annotations.go
@@ -73,3 +73,31 @@
 	// Ideally used in production use cases
 	GitOpsProfile ProfileType = "gitops"
 )
+
+const (
+	DefaultProfile = PreviewProfile
+)
+
+// deprecated prod profile is deprecate and not supported, use preview profile
+var supportedProfiles = map[ProfileType]ProfileType{DevProfile: DevProfile, PreviewProfile: PreviewProfile, GitOpsProfile: GitOpsProfile}
+
+func GetProfileOrDefault(annotation map[string]string) ProfileType {
+	if annotation == nil {
+		return DefaultProfile
+	}
+	if profile, ok := supportedProfiles[ProfileType(annotation[Profile])]; !ok {
+		return DefaultProfile
+	} else {
+		return profile
+	}
+}
+
+func IsDevProfile(annotation map[string]string) bool {
+	if annotation == nil {
+		return false
+	}
+	if len(annotation[Profile]) == 0 {
+		return false
+	}
+	return ProfileType(annotation[Profile]) == DevProfile
+}
diff --git a/api/metadata/annotations_test.go b/api/metadata/annotations_test.go
new file mode 100644
index 0000000..b0437bb
--- /dev/null
+++ b/api/metadata/annotations_test.go
@@ -0,0 +1,43 @@
+// Copyright 2024 Apache Software Foundation (ASF)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metadata
+
+import (
+	"testing"
+)
+
+func TestGetProfile(t *testing.T) {
+	type args struct {
+		annotation map[string]string
+	}
+	tests := []struct {
+		name string
+		args args
+		want ProfileType
+	}{
+		{"Empty Annotations", args{annotation: nil}, DefaultProfile},
+		{"Non-existent Profile", args{annotation: map[string]string{Profile: "IDontExist"}}, DefaultProfile},
+		{"Regular Annotation", args{annotation: map[string]string{Profile: GitOpsProfile.String()}}, GitOpsProfile},
+		{"Deprecated Annotation", args{annotation: map[string]string{Profile: ProdProfile.String()}}, DefaultProfile},
+		{"Dev Annotation", args{annotation: map[string]string{Profile: DevProfile.String()}}, DevProfile},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := GetProfileOrDefault(tt.args.annotation); got != tt.want {
+				t.Errorf("GetProfileOrDefault() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
diff --git a/api/v1alpha08/podtemplate_types.go b/api/v1alpha08/podtemplate_types.go
new file mode 100644
index 0000000..3811a1b
--- /dev/null
+++ b/api/v1alpha08/podtemplate_types.go
@@ -0,0 +1,545 @@
+// Copyright 2024 Apache Software Foundation (ASF)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha08
+
+import corev1 "k8s.io/api/core/v1"
+
+// ContainerSpec is the container for the internal deployments based on the default Kubernetes Container API
+type ContainerSpec struct {
+	// Container image name.
+	// More info: https://kubernetes.io/docs/concepts/containers/images
+	// This field is optional to allow higher level config management to default or override
+	// container images in workload controllers like Deployments and StatefulSets.
+	// +optional
+	Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
+	// Entrypoint array. Not executed within a shell.
+	// The container image's ENTRYPOINT is used if this is not provided.
+	// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+	// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+	// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+	// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+	// of whether the variable exists or not. Cannot be updated.
+	// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+	// +optional
+	Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
+	// Arguments to the entrypoint.
+	// The container image's CMD is used if this is not provided.
+	// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+	// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+	// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+	// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+	// of whether the variable exists or not. Cannot be updated.
+	// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+	// +optional
+	Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
+	// List of ports to expose from the container. Not specifying a port here
+	// DOES NOT prevent that port from being exposed. Any port which is
+	// listening on the default "0.0.0.0" address inside a container will be
+	// accessible from the network.
+	// Modifying this array with strategic merge patch may corrupt the data.
+	// For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+	// Cannot be updated.
+	// +optional
+	// +patchMergeKey=containerPort
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=containerPort
+	// +listMapKey=protocol
+	Ports []corev1.ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
+	// List of sources to populate environment variables in the container.
+	// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+	// will be reported as an event when the container is starting. When a key exists in multiple
+	// sources, the value associated with the last source will take precedence.
+	// Values defined by an Env with a duplicate key will take precedence.
+	// Cannot be updated.
+	// +optional
+	EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
+	// List of environment variables to set in the container.
+	// Cannot be updated.
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
+	// Compute Resources required by this container.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+	// +optional
+	Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
+	// Resources resize policy for the container.
+	// +featureGate=InPlacePodVerticalScaling
+	// +optional
+	// +listType=atomic
+	ResizePolicy []corev1.ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"`
+	// Pod volumes to mount into the container's filesystem.
+	// Cannot be updated.
+	// +optional
+	// +patchMergeKey=mountPath
+	// +patchStrategy=merge
+	VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
+	// volumeDevices is the list of block devices to be used by the container.
+	// +patchMergeKey=devicePath
+	// +patchStrategy=merge
+	// +optional
+	VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
+	// Periodic probe of container liveness.
+	// Container will be restarted if the probe fails.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+	// +optional
+	LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
+	// Periodic probe of container service readiness.
+	// Container will be removed from service endpoints if the probe fails.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+	// +optional
+	ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
+	// StartupProbe indicates that the Pod has successfully initialized.
+	// If specified, no other probes are executed until this completes successfully.
+	// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+	// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+	// when it might take a long time to load data or warm a cache, than during steady-state operation.
+	// This cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+	// +optional
+	StartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"`
+	// Actions that the management system should take in response to container lifecycle events.
+	// Cannot be updated.
+	// +optional
+	Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
+	// Optional: Path at which the file to which the container's termination message
+	// will be written is mounted into the container's filesystem.
+	// Message written is intended to be brief final status, such as an assertion failure message.
+	// Will be truncated by the node if greater than 4096 bytes. The total message length across
+	// all containers will be limited to 12kb.
+	// Defaults to /dev/termination-log.
+	// Cannot be updated.
+	// +optional
+	TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
+	// Indicate how the termination message should be populated. File will use the contents of
+	// terminationMessagePath to populate the container status message on both success and failure.
+	// FallbackToLogsOnError will use the last chunk of container log output if the termination
+	// message file is empty and the container exited with an error.
+	// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+	// Defaults to File.
+	// Cannot be updated.
+	// +optional
+	TerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
+	// Image pull policy.
+	// One of Always, Never, IfNotPresent.
+	// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+	// +optional
+	ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
+	// SecurityContext defines the security options the container should be run with.
+	// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+	// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+	// +optional
+	SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
+
+	// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
+	// and shouldn't be used for general purpose containers.
+
+	// Whether this container should allocate a buffer for stdin in the container runtime. If this
+	// is not set, reads from stdin in the container will always result in EOF.
+	// Default is false.
+	// +optional
+	Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
+	// Whether the container runtime should close the stdin channel after it has been opened by
+	// a single attach. When stdin is true the stdin stream will remain open across multiple attach
+	// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+	// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+	// at which time stdin is closed and remains closed until the container is restarted. If this
+	// flag is false, a container processes that reads from stdin will never receive an EOF.
+	// Default is false
+	// +optional
+	StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
+	// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+	// Default is false.
+	// +optional
+	TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
+}
+
+// ToContainer converts to Kubernetes Container API.
+func (f *ContainerSpec) ToContainer() corev1.Container {
+	return corev1.Container{
+		Name:                     DefaultContainerName,
+		Image:                    f.Image,
+		Command:                  f.Command,
+		Args:                     f.Args,
+		Ports:                    f.Ports,
+		EnvFrom:                  f.EnvFrom,
+		Env:                      f.Env,
+		Resources:                f.Resources,
+		ResizePolicy:             f.ResizePolicy,
+		VolumeMounts:             f.VolumeMounts,
+		VolumeDevices:            f.VolumeDevices,
+		LivenessProbe:            f.LivenessProbe,
+		ReadinessProbe:           f.ReadinessProbe,
+		StartupProbe:             f.StartupProbe,
+		Lifecycle:                f.Lifecycle,
+		TerminationMessagePath:   f.TerminationMessagePath,
+		TerminationMessagePolicy: f.TerminationMessagePolicy,
+		ImagePullPolicy:          f.ImagePullPolicy,
+		SecurityContext:          f.SecurityContext,
+		Stdin:                    f.Stdin,
+		StdinOnce:                f.StdinOnce,
+		TTY:                      f.TTY,
+	}
+}
+
+// PodSpec describes the PodSpec for the internal deployments based on the default Kubernetes PodSpec API
+type PodSpec struct {
+	// List of volumes that can be mounted by containers belonging to the pod.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge,retainKeys
+	Volumes []corev1.Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
+	// List of initialization containers belonging to the pod.
+	// Init containers are executed in order prior to containers being started. If any
+	// init container fails, the pod is considered to have failed and is handled according
+	// to its restartPolicy. The name for an init container or normal container must be
+	// unique among all containers.
+	// Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
+	// The resourceRequirements of an init container are taken into account during scheduling
+	// by finding the highest request/limit for each resource type, and then using the max of
+	// of that value or the sum of the normal containers. Limits are applied to init containers
+	// in a similar fashion.
+	// Init containers cannot currently be added or removed.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	InitContainers []corev1.Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
+	// List of containers belonging to the pod.
+	// Containers cannot currently be added or removed.
+	// There must be at least one container in a Pod.
+	// Cannot be updated.
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	Containers []corev1.Container `json:"containers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
+	// Restart policy for all containers within the pod.
+	// One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.
+	// Default to Always.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
+	// +optional
+	RestartPolicy corev1.RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"`
+	// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+	// Value must be non-negative integer. The value zero indicates stop immediately via
+	// the kill signal (no opportunity to shut down).
+	// If this value is nil, the default grace period will be used instead.
+	// The grace period is the duration in seconds after the processes running in the pod are sent
+	// a termination signal and the time when the processes are forcibly halted with a kill signal.
+	// Set this value longer than the expected cleanup time for your process.
+	// Defaults to 30 seconds.
+	// +optional
+	TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"`
+	// Optional duration in seconds the pod may be active on the node relative to
+	// StartTime before the system will actively try to mark it failed and kill associated containers.
+	// Value must be a positive integer.
+	// +optional
+	ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"`
+	// Set DNS policy for the pod.
+	// Defaults to "ClusterFirst".
+	// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
+	// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
+	// To have DNS options set along with hostNetwork, you have to specify DNS policy
+	// explicitly to 'ClusterFirstWithHostNet'.
+	// +optional
+	DNSPolicy corev1.DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
+	// NodeSelector is a selector which must be true for the pod to fit on a node.
+	// Selector which must match a node's labels for the pod to be scheduled on that node.
+	// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+	// +optional
+	// +mapType=atomic
+	NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
+
+	// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
+	// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+	// +optional
+	ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"`
+	// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
+	// +optional
+	AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
+
+	// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
+	// the scheduler simply schedules this pod onto that node, assuming that it fits resource
+	// requirements.
+	// +optional
+	NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
+	// Host networking requested for this pod. Use the host's network namespace.
+	// If this option is set, the ports that will be used must be specified.
+	// Default to false.
+	// +k8s:conversion-gen=false
+	// +optional
+	HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"`
+	// Use the host's pid namespace.
+	// Optional: Default to false.
+	// +k8s:conversion-gen=false
+	// +optional
+	HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"`
+	// Use the host's ipc namespace.
+	// Optional: Default to false.
+	// +k8s:conversion-gen=false
+	// +optional
+	HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"`
+	// Share a single process namespace between all of the containers in a pod.
+	// When this is set containers will be able to view and signal processes from other containers
+	// in the same pod, and the first process in each container will not be assigned PID 1.
+	// HostPID and ShareProcessNamespace cannot both be set.
+	// Optional: Default to false.
+	// +k8s:conversion-gen=false
+	// +optional
+	ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"`
+	// SecurityContext holds pod-level security attributes and common container settings.
+	// Optional: Defaults to empty.  See type description for default values of each field.
+	// +optional
+	SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"`
+	// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+	// If specified, these secrets will be passed to individual puller implementations for them to use.
+	// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
+	// Specifies the hostname of the Pod
+	// If not specified, the pod's hostname will be set to a system-defined value.
+	// +optional
+	Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"`
+	// If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
+	// If not specified, the pod will not have a domainname at all.
+	// +optional
+	Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"`
+	// If specified, the pod's scheduling constraints
+	// +optional
+	Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"`
+	// If specified, the pod will be dispatched by specified scheduler.
+	// If not specified, the pod will be dispatched by default scheduler.
+	// +optional
+	SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"`
+	// If specified, the pod's tolerations.
+	// +optional
+	Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"`
+	// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
+	// file if specified. This is only valid for non-hostNetwork pods.
+	// +optional
+	// +patchMergeKey=ip
+	// +patchStrategy=merge
+	HostAliases []corev1.HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"`
+	// If specified, indicates the pod's priority. "system-node-critical" and
+	// "system-cluster-critical" are two special keywords which indicate the
+	// highest priorities with the former being the highest priority. Any other
+	// name must be defined by creating a PriorityClass object with that name.
+	// If not specified, the pod priority will be default or zero if there is no
+	// default.
+	// +optional
+	PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"`
+	// The priority value. Various system components use this field to find the
+	// priority of the pod. When Priority Admission Controller is enabled, it
+	// prevents users from setting this field. The admission controller populates
+	// this field from PriorityClassName.
+	// The higher the value, the higher the priority.
+	// +optional
+	Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"`
+	// Specifies the DNS parameters of a pod.
+	// Parameters specified here will be merged to the generated DNS
+	// configuration based on DNSPolicy.
+	// +optional
+	DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"`
+	// If specified, all readiness gates will be evaluated for pod readiness.
+	// A pod is ready when all its containers are ready AND
+	// all conditions specified in the readiness gates have status equal to "True"
+	// More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
+	// +optional
+	ReadinessGates []corev1.PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"`
+	// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
+	// to run this pod.  If no RuntimeClass resource matches the named class, the pod will not be run.
+	// If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
+	// empty definition that uses the default runtime handler.
+	// More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
+	// +optional
+	RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"`
+	// EnableServiceLinks indicates whether information about services should be injected into pod's
+	// environment variables, matching the syntax of Docker links.
+	// Optional: Defaults to true.
+	// +optional
+	EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"`
+	// PreemptionPolicy is the Policy for preempting pods with lower priority.
+	// One of Never, PreemptLowerPriority.
+	// Defaults to PreemptLowerPriority if unset.
+	// +optional
+	PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"`
+	// Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
+	// This field will be autopopulated at admission time by the RuntimeClass admission controller. If
+	// the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
+	// The RuntimeClass admission controller will reject Pod create requests which have the overhead already
+	// set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
+	// defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
+	// More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+	// +optional
+	Overhead corev1.ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"`
+	// TopologySpreadConstraints describes how a group of pods ought to spread across topology
+	// domains. Scheduler will schedule pods in a way which abides by the constraints.
+	// All topologySpreadConstraints are ANDed.
+	// +optional
+	// +patchMergeKey=topologyKey
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=topologyKey
+	// +listMapKey=whenUnsatisfiable
+	TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,33,opt,name=topologySpreadConstraints"`
+	// If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).
+	// In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).
+	// In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN.
+	// If a pod does not have FQDN, this has no effect.
+	// Default to false.
+	// +optional
+	SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty" protobuf:"varint,35,opt,name=setHostnameAsFQDN"`
+	// Specifies the OS of the containers in the pod.
+	// Some pod and container fields are restricted if this is set.
+	//
+	// If the OS field is set to linux, the following fields must be unset:
+	// -securityContext.windowsOptions
+	//
+	// If the OS field is set to windows, following fields must be unset:
+	// - spec.hostPID
+	// - spec.hostIPC
+	// - spec.hostUsers
+	// - spec.securityContext.seLinuxOptions
+	// - spec.securityContext.seccompProfile
+	// - spec.securityContext.fsGroup
+	// - spec.securityContext.fsGroupChangePolicy
+	// - spec.securityContext.sysctls
+	// - spec.shareProcessNamespace
+	// - spec.securityContext.runAsUser
+	// - spec.securityContext.runAsGroup
+	// - spec.securityContext.supplementalGroups
+	// - spec.containers[*].securityContext.seLinuxOptions
+	// - spec.containers[*].securityContext.seccompProfile
+	// - spec.containers[*].securityContext.capabilities
+	// - spec.containers[*].securityContext.readOnlyRootFilesystem
+	// - spec.containers[*].securityContext.privileged
+	// - spec.containers[*].securityContext.allowPrivilegeEscalation
+	// - spec.containers[*].securityContext.procMount
+	// - spec.containers[*].securityContext.runAsUser
+	// - spec.containers[*].securityContext.runAsGroup
+	// +optional
+	OS *corev1.PodOS `json:"os,omitempty" protobuf:"bytes,36,opt,name=os"`
+
+	// Use the host's user namespace.
+	// Optional: Default to true.
+	// If set to true or not present, the pod will be run in the host user namespace, useful
+	// for when the pod needs a feature only available to the host user namespace, such as
+	// loading a kernel module with CAP_SYS_MODULE.
+	// When set to false, a new userns is created for the pod. Setting false is useful for
+	// mitigating container breakout vulnerabilities even allowing users to run their
+	// containers as root without actually having root privileges on the host.
+	// This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.
+	// +k8s:conversion-gen=false
+	// +optional
+	HostUsers *bool `json:"hostUsers,omitempty" protobuf:"bytes,37,opt,name=hostUsers"`
+
+	// SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
+	// If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the
+	// scheduler will not attempt to schedule the pod.
+	//
+	// SchedulingGates can only be set at pod creation time, and be removed only afterwards.
+	//
+	// This is a beta feature enabled by the PodSchedulingReadiness feature gate.
+	//
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=name
+	// +featureGate=PodSchedulingReadiness
+	// +optional
+	SchedulingGates []corev1.PodSchedulingGate `json:"schedulingGates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,38,opt,name=schedulingGates"`
+	// ResourceClaims defines which ResourceClaims must be allocated
+	// and reserved before the Pod is allowed to start. The resources
+	// will be made available to those containers which consume them
+	// by name.
+	//
+	// This is an alpha field and requires enabling the
+	// DynamicResourceAllocation feature gate.
+	//
+	// This field is immutable.
+	//
+	// +patchMergeKey=name
+	// +patchStrategy=merge,retainKeys
+	// +listType=map
+	// +listMapKey=name
+	// +featureGate=DynamicResourceAllocation
+	// +optional
+	ResourceClaims []corev1.PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"`
+}
+
+func (f *PodSpec) ToPodSpec() corev1.PodSpec {
+	return corev1.PodSpec{
+		Volumes:                       f.Volumes,
+		InitContainers:                f.InitContainers,
+		Containers:                    f.Containers,
+		RestartPolicy:                 f.RestartPolicy,
+		TerminationGracePeriodSeconds: f.TerminationGracePeriodSeconds,
+		ActiveDeadlineSeconds:         f.ActiveDeadlineSeconds,
+		DNSPolicy:                     f.DNSPolicy,
+		NodeSelector:                  f.NodeSelector,
+		ServiceAccountName:            f.ServiceAccountName,
+		AutomountServiceAccountToken:  f.AutomountServiceAccountToken,
+		NodeName:                      f.NodeName,
+		HostNetwork:                   f.HostNetwork,
+		HostPID:                       f.HostPID,
+		HostIPC:                       f.HostIPC,
+		ShareProcessNamespace:         f.ShareProcessNamespace,
+		SecurityContext:               f.SecurityContext,
+		ImagePullSecrets:              f.ImagePullSecrets,
+		Hostname:                      f.Hostname,
+		Subdomain:                     f.Subdomain,
+		Affinity:                      f.Affinity,
+		SchedulerName:                 f.SchedulerName,
+		Tolerations:                   f.Tolerations,
+		HostAliases:                   f.HostAliases,
+		PriorityClassName:             f.PriorityClassName,
+		Priority:                      f.Priority,
+		DNSConfig:                     f.DNSConfig,
+		ReadinessGates:                f.ReadinessGates,
+		RuntimeClassName:              f.RuntimeClassName,
+		EnableServiceLinks:            f.EnableServiceLinks,
+		PreemptionPolicy:              f.PreemptionPolicy,
+		Overhead:                      f.Overhead,
+		TopologySpreadConstraints:     f.TopologySpreadConstraints,
+		SetHostnameAsFQDN:             f.SetHostnameAsFQDN,
+		OS:                            f.OS,
+		HostUsers:                     f.HostUsers,
+		SchedulingGates:               f.SchedulingGates,
+		ResourceClaims:                f.ResourceClaims,
+	}
+}
+
+// PodTemplateSpec describes the desired custom Kubernetes PodTemplate definition for the deployed flow or service.
+//
+// The ContainerSpec describes the container where the actual flow or service is running. It will override any default definitions.
+// For example, to override the image one can use `.spec.podTemplate.container.image = my/image:tag`.
+type PodTemplateSpec struct {
+	// Container is the Kubernetes container where the application should run.
+	// One can change this attribute in order to override the defaults provided by the operator.
+	// +optional
+	Container ContainerSpec `json:"container,omitempty"`
+	// +optional
+	PodSpec `json:",inline"`
+	// +optional
+	Replicas *int32 `json:"replicas,omitempty"`
+}
diff --git a/api/v1alpha08/sonataflow_types.go b/api/v1alpha08/sonataflow_types.go
index 3237fc1..7763dc2 100644
--- a/api/v1alpha08/sonataflow_types.go
+++ b/api/v1alpha08/sonataflow_types.go
@@ -31,524 +31,19 @@
 
 const DefaultContainerName = "workflow"
 
-// ContainerSpec is the container for the internal deployments based on the default Kubernetes Container API
-type ContainerSpec struct {
-	// Container image name.
-	// More info: https://kubernetes.io/docs/concepts/containers/images
-	// This field is optional to allow higher level config management to default or override
-	// container images in workload controllers like Deployments and StatefulSets.
-	// +optional
-	Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
-	// Entrypoint array. Not executed within a shell.
-	// The container image's ENTRYPOINT is used if this is not provided.
-	// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
-	// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
-	// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
-	// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
-	// of whether the variable exists or not. Cannot be updated.
-	// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
-	// +optional
-	Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
-	// Arguments to the entrypoint.
-	// The container image's CMD is used if this is not provided.
-	// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
-	// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
-	// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
-	// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
-	// of whether the variable exists or not. Cannot be updated.
-	// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
-	// +optional
-	Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
-	// List of ports to expose from the container. Not specifying a port here
-	// DOES NOT prevent that port from being exposed. Any port which is
-	// listening on the default "0.0.0.0" address inside a container will be
-	// accessible from the network.
-	// Modifying this array with strategic merge patch may corrupt the data.
-	// For more information See https://github.com/kubernetes/kubernetes/issues/108255.
-	// Cannot be updated.
-	// +optional
-	// +patchMergeKey=containerPort
-	// +patchStrategy=merge
-	// +listType=map
-	// +listMapKey=containerPort
-	// +listMapKey=protocol
-	Ports []corev1.ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
-	// List of sources to populate environment variables in the container.
-	// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
-	// will be reported as an event when the container is starting. When a key exists in multiple
-	// sources, the value associated with the last source will take precedence.
-	// Values defined by an Env with a duplicate key will take precedence.
-	// Cannot be updated.
-	// +optional
-	EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
-	// List of environment variables to set in the container.
-	// Cannot be updated.
-	// +optional
-	// +patchMergeKey=name
-	// +patchStrategy=merge
-	Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
-	// Compute Resources required by this container.
-	// Cannot be updated.
-	// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-	// +optional
-	Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
-	// Resources resize policy for the container.
-	// +featureGate=InPlacePodVerticalScaling
-	// +optional
-	// +listType=atomic
-	ResizePolicy []corev1.ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"`
-	// Pod volumes to mount into the container's filesystem.
-	// Cannot be updated.
-	// +optional
-	// +patchMergeKey=mountPath
-	// +patchStrategy=merge
-	VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
-	// volumeDevices is the list of block devices to be used by the container.
-	// +patchMergeKey=devicePath
-	// +patchStrategy=merge
-	// +optional
-	VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
-	// Periodic probe of container liveness.
-	// Container will be restarted if the probe fails.
-	// Cannot be updated.
-	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
-	// +optional
-	LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
-	// Periodic probe of container service readiness.
-	// Container will be removed from service endpoints if the probe fails.
-	// Cannot be updated.
-	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
-	// +optional
-	ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
-	// StartupProbe indicates that the Pod has successfully initialized.
-	// If specified, no other probes are executed until this completes successfully.
-	// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
-	// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
-	// when it might take a long time to load data or warm a cache, than during steady-state operation.
-	// This cannot be updated.
-	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
-	// +optional
-	StartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"`
-	// Actions that the management system should take in response to container lifecycle events.
-	// Cannot be updated.
-	// +optional
-	Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
-	// Optional: Path at which the file to which the container's termination message
-	// will be written is mounted into the container's filesystem.
-	// Message written is intended to be brief final status, such as an assertion failure message.
-	// Will be truncated by the node if greater than 4096 bytes. The total message length across
-	// all containers will be limited to 12kb.
-	// Defaults to /dev/termination-log.
-	// Cannot be updated.
-	// +optional
-	TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
-	// Indicate how the termination message should be populated. File will use the contents of
-	// terminationMessagePath to populate the container status message on both success and failure.
-	// FallbackToLogsOnError will use the last chunk of container log output if the termination
-	// message file is empty and the container exited with an error.
-	// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
-	// Defaults to File.
-	// Cannot be updated.
-	// +optional
-	TerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
-	// Image pull policy.
-	// One of Always, Never, IfNotPresent.
-	// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
-	// Cannot be updated.
-	// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
-	// +optional
-	ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
-	// SecurityContext defines the security options the container should be run with.
-	// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
-	// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-	// +optional
-	SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
+// DeploymentModel defines how a given pod will be deployed
+// +kubebuilder:validation:Enum=kubernetes;knative
+type DeploymentModel string
 
-	// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
-	// and shouldn't be used for general purpose containers.
+const (
+	// KubernetesDeploymentModel defines a PodSpec to be deployed as a regular Kubernetes Deployment
+	KubernetesDeploymentModel DeploymentModel = "kubernetes"
+	// KnativeDeploymentModel defines a PodSpec to be deployed as a Knative Serving Service
+	KnativeDeploymentModel DeploymentModel = "knative"
+)
 
-	// Whether this container should allocate a buffer for stdin in the container runtime. If this
-	// is not set, reads from stdin in the container will always result in EOF.
-	// Default is false.
-	// +optional
-	Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
-	// Whether the container runtime should close the stdin channel after it has been opened by
-	// a single attach. When stdin is true the stdin stream will remain open across multiple attach
-	// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
-	// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
-	// at which time stdin is closed and remains closed until the container is restarted. If this
-	// flag is false, a container processes that reads from stdin will never receive an EOF.
-	// Default is false
-	// +optional
-	StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
-	// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
-	// Default is false.
-	// +optional
-	TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
-}
-
-// ToContainer converts to Kubernetes Container API.
-func (f *ContainerSpec) ToContainer() corev1.Container {
-	return corev1.Container{
-		Name:                     DefaultContainerName,
-		Image:                    f.Image,
-		Command:                  f.Command,
-		Args:                     f.Args,
-		Ports:                    f.Ports,
-		EnvFrom:                  f.EnvFrom,
-		Env:                      f.Env,
-		Resources:                f.Resources,
-		ResizePolicy:             f.ResizePolicy,
-		VolumeMounts:             f.VolumeMounts,
-		VolumeDevices:            f.VolumeDevices,
-		LivenessProbe:            f.LivenessProbe,
-		ReadinessProbe:           f.ReadinessProbe,
-		StartupProbe:             f.StartupProbe,
-		Lifecycle:                f.Lifecycle,
-		TerminationMessagePath:   f.TerminationMessagePath,
-		TerminationMessagePolicy: f.TerminationMessagePolicy,
-		ImagePullPolicy:          f.ImagePullPolicy,
-		SecurityContext:          f.SecurityContext,
-		Stdin:                    f.Stdin,
-		StdinOnce:                f.StdinOnce,
-		TTY:                      f.TTY,
-	}
-}
-
-// PodSpec describes the PodSpec for the internal deployments based on the default Kubernetes PodSpec API
-type PodSpec struct {
-	// List of volumes that can be mounted by containers belonging to the pod.
-	// More info: https://kubernetes.io/docs/concepts/storage/volumes
-	// +optional
-	// +patchMergeKey=name
-	// +patchStrategy=merge,retainKeys
-	Volumes []corev1.Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
-	// List of initialization containers belonging to the pod.
-	// Init containers are executed in order prior to containers being started. If any
-	// init container fails, the pod is considered to have failed and is handled according
-	// to its restartPolicy. The name for an init container or normal container must be
-	// unique among all containers.
-	// Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
-	// The resourceRequirements of an init container are taken into account during scheduling
-	// by finding the highest request/limit for each resource type, and then using the max of
-	// of that value or the sum of the normal containers. Limits are applied to init containers
-	// in a similar fashion.
-	// Init containers cannot currently be added or removed.
-	// Cannot be updated.
-	// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
-	// +patchMergeKey=name
-	// +patchStrategy=merge
-	InitContainers []corev1.Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
-	// List of containers belonging to the pod.
-	// Containers cannot currently be added or removed.
-	// There must be at least one container in a Pod.
-	// Cannot be updated.
-	// +optional
-	// +patchMergeKey=name
-	// +patchStrategy=merge
-	Containers []corev1.Container `json:"containers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
-	// Restart policy for all containers within the pod.
-	// One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.
-	// Default to Always.
-	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
-	// +optional
-	RestartPolicy corev1.RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"`
-	// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
-	// Value must be non-negative integer. The value zero indicates stop immediately via
-	// the kill signal (no opportunity to shut down).
-	// If this value is nil, the default grace period will be used instead.
-	// The grace period is the duration in seconds after the processes running in the pod are sent
-	// a termination signal and the time when the processes are forcibly halted with a kill signal.
-	// Set this value longer than the expected cleanup time for your process.
-	// Defaults to 30 seconds.
-	// +optional
-	TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"`
-	// Optional duration in seconds the pod may be active on the node relative to
-	// StartTime before the system will actively try to mark it failed and kill associated containers.
-	// Value must be a positive integer.
-	// +optional
-	ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"`
-	// Set DNS policy for the pod.
-	// Defaults to "ClusterFirst".
-	// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
-	// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
-	// To have DNS options set along with hostNetwork, you have to specify DNS policy
-	// explicitly to 'ClusterFirstWithHostNet'.
-	// +optional
-	DNSPolicy corev1.DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
-	// NodeSelector is a selector which must be true for the pod to fit on a node.
-	// Selector which must match a node's labels for the pod to be scheduled on that node.
-	// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-	// +optional
-	// +mapType=atomic
-	NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
-
-	// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
-	// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-	// +optional
-	ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"`
-	// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
-	// +optional
-	AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
-
-	// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
-	// the scheduler simply schedules this pod onto that node, assuming that it fits resource
-	// requirements.
-	// +optional
-	NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
-	// Host networking requested for this pod. Use the host's network namespace.
-	// If this option is set, the ports that will be used must be specified.
-	// Default to false.
-	// +k8s:conversion-gen=false
-	// +optional
-	HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"`
-	// Use the host's pid namespace.
-	// Optional: Default to false.
-	// +k8s:conversion-gen=false
-	// +optional
-	HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"`
-	// Use the host's ipc namespace.
-	// Optional: Default to false.
-	// +k8s:conversion-gen=false
-	// +optional
-	HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"`
-	// Share a single process namespace between all of the containers in a pod.
-	// When this is set containers will be able to view and signal processes from other containers
-	// in the same pod, and the first process in each container will not be assigned PID 1.
-	// HostPID and ShareProcessNamespace cannot both be set.
-	// Optional: Default to false.
-	// +k8s:conversion-gen=false
-	// +optional
-	ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"`
-	// SecurityContext holds pod-level security attributes and common container settings.
-	// Optional: Defaults to empty.  See type description for default values of each field.
-	// +optional
-	SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"`
-	// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
-	// If specified, these secrets will be passed to individual puller implementations for them to use.
-	// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
-	// +optional
-	// +patchMergeKey=name
-	// +patchStrategy=merge
-	ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
-	// Specifies the hostname of the Pod
-	// If not specified, the pod's hostname will be set to a system-defined value.
-	// +optional
-	Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"`
-	// If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
-	// If not specified, the pod will not have a domainname at all.
-	// +optional
-	Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"`
-	// If specified, the pod's scheduling constraints
-	// +optional
-	Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"`
-	// If specified, the pod will be dispatched by specified scheduler.
-	// If not specified, the pod will be dispatched by default scheduler.
-	// +optional
-	SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"`
-	// If specified, the pod's tolerations.
-	// +optional
-	Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"`
-	// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
-	// file if specified. This is only valid for non-hostNetwork pods.
-	// +optional
-	// +patchMergeKey=ip
-	// +patchStrategy=merge
-	HostAliases []corev1.HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"`
-	// If specified, indicates the pod's priority. "system-node-critical" and
-	// "system-cluster-critical" are two special keywords which indicate the
-	// highest priorities with the former being the highest priority. Any other
-	// name must be defined by creating a PriorityClass object with that name.
-	// If not specified, the pod priority will be default or zero if there is no
-	// default.
-	// +optional
-	PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"`
-	// The priority value. Various system components use this field to find the
-	// priority of the pod. When Priority Admission Controller is enabled, it
-	// prevents users from setting this field. The admission controller populates
-	// this field from PriorityClassName.
-	// The higher the value, the higher the priority.
-	// +optional
-	Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"`
-	// Specifies the DNS parameters of a pod.
-	// Parameters specified here will be merged to the generated DNS
-	// configuration based on DNSPolicy.
-	// +optional
-	DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"`
-	// If specified, all readiness gates will be evaluated for pod readiness.
-	// A pod is ready when all its containers are ready AND
-	// all conditions specified in the readiness gates have status equal to "True"
-	// More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
-	// +optional
-	ReadinessGates []corev1.PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"`
-	// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
-	// to run this pod.  If no RuntimeClass resource matches the named class, the pod will not be run.
-	// If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
-	// empty definition that uses the default runtime handler.
-	// More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
-	// +optional
-	RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"`
-	// EnableServiceLinks indicates whether information about services should be injected into pod's
-	// environment variables, matching the syntax of Docker links.
-	// Optional: Defaults to true.
-	// +optional
-	EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"`
-	// PreemptionPolicy is the Policy for preempting pods with lower priority.
-	// One of Never, PreemptLowerPriority.
-	// Defaults to PreemptLowerPriority if unset.
-	// +optional
-	PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"`
-	// Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
-	// This field will be autopopulated at admission time by the RuntimeClass admission controller. If
-	// the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
-	// The RuntimeClass admission controller will reject Pod create requests which have the overhead already
-	// set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
-	// defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
-	// More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
-	// +optional
-	Overhead corev1.ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"`
-	// TopologySpreadConstraints describes how a group of pods ought to spread across topology
-	// domains. Scheduler will schedule pods in a way which abides by the constraints.
-	// All topologySpreadConstraints are ANDed.
-	// +optional
-	// +patchMergeKey=topologyKey
-	// +patchStrategy=merge
-	// +listType=map
-	// +listMapKey=topologyKey
-	// +listMapKey=whenUnsatisfiable
-	TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,33,opt,name=topologySpreadConstraints"`
-	// If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).
-	// In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).
-	// In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN.
-	// If a pod does not have FQDN, this has no effect.
-	// Default to false.
-	// +optional
-	SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty" protobuf:"varint,35,opt,name=setHostnameAsFQDN"`
-	// Specifies the OS of the containers in the pod.
-	// Some pod and container fields are restricted if this is set.
-	//
-	// If the OS field is set to linux, the following fields must be unset:
-	// -securityContext.windowsOptions
-	//
-	// If the OS field is set to windows, following fields must be unset:
-	// - spec.hostPID
-	// - spec.hostIPC
-	// - spec.hostUsers
-	// - spec.securityContext.seLinuxOptions
-	// - spec.securityContext.seccompProfile
-	// - spec.securityContext.fsGroup
-	// - spec.securityContext.fsGroupChangePolicy
-	// - spec.securityContext.sysctls
-	// - spec.shareProcessNamespace
-	// - spec.securityContext.runAsUser
-	// - spec.securityContext.runAsGroup
-	// - spec.securityContext.supplementalGroups
-	// - spec.containers[*].securityContext.seLinuxOptions
-	// - spec.containers[*].securityContext.seccompProfile
-	// - spec.containers[*].securityContext.capabilities
-	// - spec.containers[*].securityContext.readOnlyRootFilesystem
-	// - spec.containers[*].securityContext.privileged
-	// - spec.containers[*].securityContext.allowPrivilegeEscalation
-	// - spec.containers[*].securityContext.procMount
-	// - spec.containers[*].securityContext.runAsUser
-	// - spec.containers[*].securityContext.runAsGroup
-	// +optional
-	OS *corev1.PodOS `json:"os,omitempty" protobuf:"bytes,36,opt,name=os"`
-
-	// Use the host's user namespace.
-	// Optional: Default to true.
-	// If set to true or not present, the pod will be run in the host user namespace, useful
-	// for when the pod needs a feature only available to the host user namespace, such as
-	// loading a kernel module with CAP_SYS_MODULE.
-	// When set to false, a new userns is created for the pod. Setting false is useful for
-	// mitigating container breakout vulnerabilities even allowing users to run their
-	// containers as root without actually having root privileges on the host.
-	// This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.
-	// +k8s:conversion-gen=false
-	// +optional
-	HostUsers *bool `json:"hostUsers,omitempty" protobuf:"bytes,37,opt,name=hostUsers"`
-
-	// SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
-	// If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the
-	// scheduler will not attempt to schedule the pod.
-	//
-	// SchedulingGates can only be set at pod creation time, and be removed only afterwards.
-	//
-	// This is a beta feature enabled by the PodSchedulingReadiness feature gate.
-	//
-	// +patchMergeKey=name
-	// +patchStrategy=merge
-	// +listType=map
-	// +listMapKey=name
-	// +featureGate=PodSchedulingReadiness
-	// +optional
-	SchedulingGates []corev1.PodSchedulingGate `json:"schedulingGates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,38,opt,name=schedulingGates"`
-	// ResourceClaims defines which ResourceClaims must be allocated
-	// and reserved before the Pod is allowed to start. The resources
-	// will be made available to those containers which consume them
-	// by name.
-	//
-	// This is an alpha field and requires enabling the
-	// DynamicResourceAllocation feature gate.
-	//
-	// This field is immutable.
-	//
-	// +patchMergeKey=name
-	// +patchStrategy=merge,retainKeys
-	// +listType=map
-	// +listMapKey=name
-	// +featureGate=DynamicResourceAllocation
-	// +optional
-	ResourceClaims []corev1.PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"`
-}
-
-func (f *PodSpec) ToPodSpec() corev1.PodSpec {
-	return corev1.PodSpec{
-		Volumes:                       f.Volumes,
-		InitContainers:                f.InitContainers,
-		Containers:                    f.Containers,
-		RestartPolicy:                 f.RestartPolicy,
-		TerminationGracePeriodSeconds: f.TerminationGracePeriodSeconds,
-		ActiveDeadlineSeconds:         f.ActiveDeadlineSeconds,
-		DNSPolicy:                     f.DNSPolicy,
-		NodeSelector:                  f.NodeSelector,
-		ServiceAccountName:            f.ServiceAccountName,
-		AutomountServiceAccountToken:  f.AutomountServiceAccountToken,
-		NodeName:                      f.NodeName,
-		HostNetwork:                   f.HostNetwork,
-		HostPID:                       f.HostPID,
-		HostIPC:                       f.HostIPC,
-		ShareProcessNamespace:         f.ShareProcessNamespace,
-		SecurityContext:               f.SecurityContext,
-		ImagePullSecrets:              f.ImagePullSecrets,
-		Hostname:                      f.Hostname,
-		Subdomain:                     f.Subdomain,
-		Affinity:                      f.Affinity,
-		SchedulerName:                 f.SchedulerName,
-		Tolerations:                   f.Tolerations,
-		HostAliases:                   f.HostAliases,
-		PriorityClassName:             f.PriorityClassName,
-		Priority:                      f.Priority,
-		DNSConfig:                     f.DNSConfig,
-		ReadinessGates:                f.ReadinessGates,
-		RuntimeClassName:              f.RuntimeClassName,
-		EnableServiceLinks:            f.EnableServiceLinks,
-		PreemptionPolicy:              f.PreemptionPolicy,
-		Overhead:                      f.Overhead,
-		TopologySpreadConstraints:     f.TopologySpreadConstraints,
-		SetHostnameAsFQDN:             f.SetHostnameAsFQDN,
-		OS:                            f.OS,
-		HostUsers:                     f.HostUsers,
-		SchedulingGates:               f.SchedulingGates,
-		ResourceClaims:                f.ResourceClaims,
-	}
-}
-
-// PodTemplateSpec describes the desired custom Kubernetes PodTemplate definition for the deployed flow or service.
-//
-// The ContainerSpec describes the container where the actual flow or service is running. It will override any default definitions.
-// For example, to override the image one can use `.spec.podTemplate.container.image = my/image:tag`.
-type PodTemplateSpec struct {
+// FlowPodTemplateSpec is a special PodTemplateSpec designed for SonataFlow deployments
+type FlowPodTemplateSpec struct {
 	// Container is the Kubernetes container where the application should run.
 	// One can change this attribute in order to override the defaults provided by the operator.
 	// +optional
@@ -556,7 +51,11 @@
 	// +optional
 	PodSpec `json:",inline"`
 	// +optional
+	// Replicas define the number of pods to start by default for this deployment model. Ignored in "knative" deployment model.
 	Replicas *int32 `json:"replicas,omitempty"`
+	// Defines the kind of deployment model for this pod spec. In dev profile, only "kubernetes" is valid.
+	// +optional
+	DeploymentModel DeploymentModel `json:"deploymentModel,omitempty"`
 }
 
 // Flow describes the contents of the Workflow definition following the CNCF Serverless Workflow Specification.
@@ -656,7 +155,7 @@
 	Resources WorkflowResources `json:"resources,omitempty"`
 	// PodTemplate describes the deployment details of this SonataFlow instance.
 	//+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="podTemplate"
-	PodTemplate PodTemplateSpec `json:"podTemplate,omitempty"`
+	PodTemplate FlowPodTemplateSpec `json:"podTemplate,omitempty"`
 	// Persistence defines the database persistence configuration for the workflow
 	Persistence *PersistenceOptionsSpec `json:"persistence,omitempty"`
 	// Sink describes the sinkBinding details of this SonataFlow instance.
@@ -752,6 +251,7 @@
 // +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=='Running')].reason`
 // +operator-sdk:csv:customresourcedefinitions:resources={{SonataFlowBuild,sonataflow.org/v1alpha08,"A SonataFlow Build"}}
 // +operator-sdk:csv:customresourcedefinitions:resources={{Deployment,apps/v1,"A Deployment for the Flow"}}
+// +operator-sdk:csv:customresourcedefinitions:resources={{Service,serving.knative.dev/v1,"A Knative Serving Service for the Flow"}}
 // +operator-sdk:csv:customresourcedefinitions:resources={{Service,v1,"A Service for the Flow"}}
 // +operator-sdk:csv:customresourcedefinitions:resources={{Route,route.openshift.io/v1,"An OpenShift Route for the Flow"}}
 // +operator-sdk:csv:customresourcedefinitions:resources={{ConfigMap,v1,"The ConfigMaps with Flow definition and additional configuration files"}}
@@ -763,6 +263,10 @@
 	Status SonataFlowStatus `json:"status,omitempty"`
 }
 
+func (s *SonataFlow) IsKnativeDeployment() bool {
+	return s.Spec.PodTemplate.DeploymentModel == KnativeDeploymentModel
+}
+
 func (s *SonataFlow) HasContainerSpecImage() bool {
 	return len(s.Spec.PodTemplate.Container.Image) > 0
 }
diff --git a/api/v1alpha08/zz_generated.deepcopy.go b/api/v1alpha08/zz_generated.deepcopy.go
index c970b10..645d2b6 100644
--- a/api/v1alpha08/zz_generated.deepcopy.go
+++ b/api/v1alpha08/zz_generated.deepcopy.go
@@ -321,6 +321,28 @@
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlowPodTemplateSpec) DeepCopyInto(out *FlowPodTemplateSpec) {
+	*out = *in
+	in.Container.DeepCopyInto(&out.Container)
+	in.PodSpec.DeepCopyInto(&out.PodSpec)
+	if in.Replicas != nil {
+		in, out := &in.Replicas, &out.Replicas
+		*out = new(int32)
+		**out = **in
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowPodTemplateSpec.
+func (in *FlowPodTemplateSpec) DeepCopy() *FlowPodTemplateSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(FlowPodTemplateSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *PersistenceOptionsSpec) DeepCopyInto(out *PersistenceOptionsSpec) {
 	*out = *in
 	if in.PostgreSQL != nil {
diff --git a/bundle/manifests/sonataflow-operator.clusterserviceversion.yaml b/bundle/manifests/sonataflow-operator.clusterserviceversion.yaml
index b71c508..7e78264 100644
--- a/bundle/manifests/sonataflow-operator.clusterserviceversion.yaml
+++ b/bundle/manifests/sonataflow-operator.clusterserviceversion.yaml
@@ -289,6 +289,9 @@
         name: A Deployment for the Flow
         version: apps/v1
       - kind: Service
+        name: A Knative Serving Service for the Flow
+        version: serving.knative.dev/v1
+      - kind: Service
         name: A Service for the Flow
         version: v1
       - kind: SonataFlowBuild
@@ -438,6 +441,22 @@
           - update
           - watch
         - apiGroups:
+          - serving.knative.dev
+          resources:
+          - service
+          - services
+          - services/status
+          - services/finalizers
+          verbs:
+          - create
+          - delete
+          - deletecollection
+          - get
+          - list
+          - patch
+          - update
+          - watch
+        - apiGroups:
           - coordination.k8s.io
           resources:
           - leases
diff --git a/bundle/manifests/sonataflow.org_sonataflows.yaml b/bundle/manifests/sonataflow.org_sonataflows.yaml
index eaf80c9..6f2cb2e 100644
--- a/bundle/manifests/sonataflow.org_sonataflows.yaml
+++ b/bundle/manifests/sonataflow.org_sonataflows.yaml
@@ -5626,6 +5626,13 @@
                       - name
                       type: object
                     type: array
+                  deploymentModel:
+                    description: Defines the kind of deployment model for this pod
+                      spec. In dev profile, only "kubernetes" is valid.
+                    enum:
+                    - kubernetes
+                    - knative
+                    type: string
                   dnsConfig:
                     description: Specifies the DNS parameters of a pod. Parameters
                       specified here will be merged to the generated DNS configuration
@@ -7153,6 +7160,8 @@
                       type: object
                     type: array
                   replicas:
+                    description: Replicas define the number of pods to start by default
+                      for this deployment model. Ignored in "knative" deployment model.
                     format: int32
                     type: integer
                   resourceClaims:
diff --git a/config/crd/bases/sonataflow.org_sonataflows.yaml b/config/crd/bases/sonataflow.org_sonataflows.yaml
index 1557c3b..61002c3 100644
--- a/config/crd/bases/sonataflow.org_sonataflows.yaml
+++ b/config/crd/bases/sonataflow.org_sonataflows.yaml
@@ -5627,6 +5627,13 @@
                       - name
                       type: object
                     type: array
+                  deploymentModel:
+                    description: Defines the kind of deployment model for this pod
+                      spec. In dev profile, only "kubernetes" is valid.
+                    enum:
+                    - kubernetes
+                    - knative
+                    type: string
                   dnsConfig:
                     description: Specifies the DNS parameters of a pod. Parameters
                       specified here will be merged to the generated DNS configuration
@@ -7154,6 +7161,8 @@
                       type: object
                     type: array
                   replicas:
+                    description: Replicas define the number of pods to start by default
+                      for this deployment model. Ignored in "knative" deployment model.
                     format: int32
                     type: integer
                   resourceClaims:
diff --git a/config/manifests/bases/sonataflow-operator.clusterserviceversion.yaml b/config/manifests/bases/sonataflow-operator.clusterserviceversion.yaml
index c61183d..c6fdf4e 100644
--- a/config/manifests/bases/sonataflow-operator.clusterserviceversion.yaml
+++ b/config/manifests/bases/sonataflow-operator.clusterserviceversion.yaml
@@ -173,6 +173,9 @@
         name: A Deployment for the Flow
         version: apps/v1
       - kind: Service
+        name: A Knative Serving Service for the Flow
+        version: serving.knative.dev/v1
+      - kind: Service
         name: A Service for the Flow
         version: v1
       - kind: SonataFlowBuild
diff --git a/config/rbac/builder_role.yaml b/config/rbac/builder_role.yaml
index 70b2ab5..ffced49 100644
--- a/config/rbac/builder_role.yaml
+++ b/config/rbac/builder_role.yaml
@@ -66,33 +66,3 @@
     - patch
     - update
     - watch
-- apiGroups:
-    - eventing.knative.dev
-  resources:
-    - triggers
-    - triggers/status
-    - triggers/finalizers
-  verbs:
-    - create
-    - delete
-    - deletecollection
-    - get
-    - list
-    - patch
-    - update
-    - watch
-- apiGroups:
-    - sources.knative.dev
-  resources:
-    - sinkbindings
-    - sinkbindings/status
-    - sinkbindings/finalizers
-  verbs:
-    - create
-    - delete
-    - deletecollection
-    - get
-    - list
-    - patch
-    - update
-    - watch
\ No newline at end of file
diff --git a/config/rbac/knative_role.yaml b/config/rbac/knative_role.yaml
new file mode 100644
index 0000000..8dad941
--- /dev/null
+++ b/config/rbac/knative_role.yaml
@@ -0,0 +1,52 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: knative-manager-role
+rules:
+  - apiGroups:
+      - eventing.knative.dev
+    resources:
+      - triggers
+      - triggers/status
+      - triggers/finalizers
+    verbs:
+      - create
+      - delete
+      - deletecollection
+      - get
+      - list
+      - patch
+      - update
+      - watch
+  - apiGroups:
+      - sources.knative.dev
+    resources:
+      - sinkbindings
+      - sinkbindings/status
+      - sinkbindings/finalizers
+    verbs:
+      - create
+      - delete
+      - deletecollection
+      - get
+      - list
+      - patch
+      - update
+      - watch
+  - apiGroups:
+      - serving.knative.dev
+    resources:
+      - service
+      - services
+      - services/status
+      - services/finalizers
+    verbs:
+      - create
+      - delete
+      - deletecollection
+      - get
+      - list
+      - patch
+      - update
+      - watch
diff --git a/config/rbac/knative_role_binding.yaml b/config/rbac/knative_role_binding.yaml
new file mode 100644
index 0000000..cbab613
--- /dev/null
+++ b/config/rbac/knative_role_binding.yaml
@@ -0,0 +1,13 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: knative-manager-rolebinding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: knative-manager-role
+subjects:
+  - kind: ServiceAccount
+    name: controller-manager
+    namespace: system
diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml
index 2ffd5ef..9a097cc 100644
--- a/config/rbac/kustomization.yaml
+++ b/config/rbac/kustomization.yaml
@@ -17,6 +17,8 @@
 - operator_role_binding_leases.yaml
 - service_discovery_role.yaml
 - service_discovery_role_binding.yaml
+- knative_role.yaml
+- knative_role_binding.yaml
 # Comment the following 4 lines if you want to disable
 # the auth proxy (https://github.com/brancz/kube-rbac-proxy)
 # which protects your /metrics endpoint.
diff --git a/controllers/platform/k8s.go b/controllers/platform/k8s.go
index e4310de..62d8b36 100644
--- a/controllers/platform/k8s.go
+++ b/controllers/platform/k8s.go
@@ -115,7 +115,7 @@
 		LivenessProbe:   liveProbe,
 		Ports: []corev1.ContainerPort{
 			{
-				Name:          utils.HttpScheme,
+				Name:          utils.DefaultServicePortName,
 				ContainerPort: int32(constants.DefaultHTTPWorkflowPortInt),
 				Protocol:      corev1.ProtocolTCP,
 			},
@@ -199,7 +199,7 @@
 	dataSvcSpec := corev1.ServiceSpec{
 		Ports: []corev1.ServicePort{
 			{
-				Name:       utils.HttpScheme,
+				Name:       utils.DefaultServicePortName,
 				Protocol:   corev1.ProtocolTCP,
 				Port:       80,
 				TargetPort: variables.DefaultHTTPWorkflowPortIntStr,
diff --git a/controllers/profiles/common/deployment.go b/controllers/profiles/common/deployment_status_manager.go
similarity index 83%
rename from controllers/profiles/common/deployment.go
rename to controllers/profiles/common/deployment_status_manager.go
index 64b6776..6272d3f 100644
--- a/controllers/profiles/common/deployment.go
+++ b/controllers/profiles/common/deployment_status_manager.go
@@ -26,7 +26,9 @@
 	appsv1 "k8s.io/api/apps/v1"
 	"k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/klog/v2"
+	servingv1 "knative.dev/serving/pkg/apis/serving/v1"
 	ctrl "sigs.k8s.io/controller-runtime"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 
@@ -39,6 +41,8 @@
 
 var _ WorkflowDeploymentManager = &deploymentHandler{}
 
+const knativeDeploymentSuffix = "-deployment"
+
 // WorkflowDeploymentManager interface to handle workflow deployment features.
 type WorkflowDeploymentManager interface {
 	// SyncDeploymentStatus updates the workflow status aligned with the deployment counterpart.
@@ -58,24 +62,42 @@
 	c client.Client
 }
 
-func (d *deploymentHandler) RolloutDeployment(ctx context.Context, workflow *operatorapi.SonataFlow) error {
-	deployment := &appsv1.Deployment{}
-	if err := d.c.Get(ctx, client.ObjectKeyFromObject(workflow), deployment); err != nil {
-		// Deployment not found, nothing to do.
-		if errors.IsNotFound(err) {
-			return nil
+func (d *deploymentHandler) getDeployment(ctx context.Context, workflow *operatorapi.SonataFlow) (*appsv1.Deployment, error) {
+	deploymentName := workflow.Name
+	if workflow.IsKnativeDeployment() {
+		ksvc := &servingv1.Service{}
+		if err := d.c.Get(ctx, client.ObjectKeyFromObject(workflow), ksvc); err != nil {
+			if errors.IsNotFound(err) {
+				return nil, nil
+			}
+			return nil, err
 		}
+		deploymentName = ksvc.Status.LatestCreatedRevisionName + knativeDeploymentSuffix
+	}
+	deployment := &appsv1.Deployment{}
+	if err := d.c.Get(ctx, types.NamespacedName{Namespace: workflow.Namespace, Name: deploymentName}, deployment); err != nil {
+		if errors.IsNotFound(err) {
+			return nil, nil
+		}
+		return nil, err
+	}
+	return deployment, nil
+}
+
+func (d *deploymentHandler) RolloutDeployment(ctx context.Context, workflow *operatorapi.SonataFlow) error {
+	deployment, err := d.getDeployment(ctx, workflow)
+	if err != nil || deployment == nil {
 		return err
 	}
-	if err := kubeutil.MarkDeploymentToRollout(deployment); err != nil {
+	if err = kubeutil.MarkDeploymentToRollout(deployment); err != nil {
 		return err
 	}
 	return d.c.Update(ctx, deployment)
 }
 
 func (d *deploymentHandler) SyncDeploymentStatus(ctx context.Context, workflow *operatorapi.SonataFlow) (ctrl.Result, error) {
-	deployment := &appsv1.Deployment{}
-	if err := d.c.Get(ctx, client.ObjectKeyFromObject(workflow), deployment); err != nil {
+	deployment, err := d.getDeployment(ctx, workflow)
+	if err != nil || deployment == nil {
 		// we should have the deployment by this time, so even if the error above is not found, we should halt.
 		workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.DeploymentUnavailableReason, "Couldn't find the workflow deployment")
 		return ctrl.Result{RequeueAfter: constants.RequeueAfterFailure}, err
diff --git a/controllers/profiles/common/knative.go b/controllers/profiles/common/knative_eventing.go
similarity index 100%
rename from controllers/profiles/common/knative.go
rename to controllers/profiles/common/knative_eventing.go
diff --git a/controllers/profiles/common/mutate_visitors.go b/controllers/profiles/common/mutate_visitors.go
index eab1944..426154e 100644
--- a/controllers/profiles/common/mutate_visitors.go
+++ b/controllers/profiles/common/mutate_visitors.go
@@ -27,6 +27,7 @@
 	"github.com/imdario/mergo"
 	appsv1 "k8s.io/api/apps/v1"
 	corev1 "k8s.io/api/core/v1"
+	servingv1 "knative.dev/serving/pkg/apis/serving/v1"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
 
@@ -55,6 +56,25 @@
 	}
 }
 
+// ImageKServiceMutateVisitor same as ImageDeploymentMutateVisitor for Knative Serving
+func ImageKServiceMutateVisitor(workflow *operatorapi.SonataFlow, image string) MutateVisitor {
+	return func(object client.Object) controllerutil.MutateFn {
+		// noop since we already have an image in the flow container defined by the user.
+		if workflow.HasContainerSpecImage() {
+			return func() error {
+				return nil
+			}
+		}
+		return func() error {
+			ksvc := object.(*servingv1.Service)
+			_, idx := kubeutil.GetContainerByName(operatorapi.DefaultContainerName, &ksvc.Spec.Template.Spec.PodSpec)
+			ksvc.Spec.Template.Spec.Containers[idx].Image = image
+			ksvc.Spec.Template.Spec.Containers[idx].ImagePullPolicy = kubeutil.GetImagePullPolicy(image)
+			return nil
+		}
+	}
+}
+
 // DeploymentMutateVisitor guarantees the state of the default Deployment object
 func DeploymentMutateVisitor(workflow *operatorapi.SonataFlow, plf *operatorapi.SonataFlowPlatform) MutateVisitor {
 	return func(object client.Object) controllerutil.MutateFn {
@@ -87,6 +107,36 @@
 	return mergo.Merge(&object.Spec.Template.Spec, original.Spec.Template.Spec, mergo.WithOverride)
 }
 
+// KServiceMutateVisitor guarantees the state of the default Knative Service object
+func KServiceMutateVisitor(workflow *operatorapi.SonataFlow, plf *operatorapi.SonataFlowPlatform) MutateVisitor {
+	return func(object client.Object) controllerutil.MutateFn {
+		return func() error {
+			if kubeutil.IsObjectNew(object) {
+				return nil
+			}
+			original, err := KServiceCreator(workflow, plf)
+			if err != nil {
+				return err
+			}
+			return EnsureKService(original.(*servingv1.Service), object.(*servingv1.Service))
+		}
+	}
+}
+
+// EnsureKService Ensure that the original Knative Service fields are immutable.
+func EnsureKService(original *servingv1.Service, object *servingv1.Service) error {
+	object.Labels = original.GetLabels()
+
+	// Clean up the volumes, they are inherited from original, additional are added by other visitors
+	object.Spec.Template.Spec.Volumes = nil
+	for i := range object.Spec.Template.Spec.Containers {
+		object.Spec.Template.Spec.Containers[i].VolumeMounts = nil
+	}
+
+	// we do a merge to not keep changing the spec since k8s will set default values to the podSpec
+	return mergo.Merge(&object.Spec.Template.Spec.PodSpec, original.Spec.Template.Spec.PodSpec, mergo.WithOverride)
+}
+
 func ServiceMutateVisitor(workflow *operatorapi.SonataFlow) MutateVisitor {
 	return func(object client.Object) controllerutil.MutateFn {
 		return func() error {
diff --git a/controllers/profiles/common/object_creators.go b/controllers/profiles/common/object_creators.go
index 8da5956..c1ac670 100644
--- a/controllers/profiles/common/object_creators.go
+++ b/controllers/profiles/common/object_creators.go
@@ -24,6 +24,7 @@
 	"strings"
 
 	"github.com/apache/incubator-kie-kogito-serverless-operator/controllers/workflowdef"
+	servingv1 "knative.dev/serving/pkg/apis/serving/v1"
 
 	cncfmodel "github.com/serverlessworkflow/sdk-go/v2/model"
 
@@ -108,6 +109,41 @@
 	return deployment, nil
 }
 
+// KServiceCreator creates the default Knative Service object for SonataFlow instances. It's based on the default DeploymentCreator.
+func KServiceCreator(workflow *operatorapi.SonataFlow, plf *operatorapi.SonataFlowPlatform) (client.Object, error) {
+	lbl := workflowproj.GetMergedLabels(workflow)
+	ksvc := &servingv1.Service{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:      workflow.Name,
+			Namespace: workflow.Namespace,
+			Labels:    lbl,
+		},
+		Spec: servingv1.ServiceSpec{
+			ConfigurationSpec: servingv1.ConfigurationSpec{
+				Template: servingv1.RevisionTemplateSpec{
+					ObjectMeta: metav1.ObjectMeta{
+						Labels: lbl,
+					},
+					Spec: servingv1.RevisionSpec{
+						PodSpec: corev1.PodSpec{},
+					},
+				},
+			},
+		},
+	}
+
+	if err := mergo.Merge(&ksvc.Spec.Template.Spec.PodSpec, workflow.Spec.PodTemplate.PodSpec.ToPodSpec(), mergo.WithOverride); err != nil {
+		return nil, err
+	}
+	flowContainer, err := defaultContainer(workflow, plf)
+	if err != nil {
+		return nil, err
+	}
+	kubeutil.AddOrReplaceContainer(operatorapi.DefaultContainerName, *flowContainer, &ksvc.Spec.Template.Spec.PodSpec)
+
+	return ksvc, nil
+}
+
 func getReplicasOrDefault(workflow *operatorapi.SonataFlow) *int32 {
 	var dReplicas int32 = 1
 	if workflow.Spec.PodTemplate.Replicas == nil {
@@ -119,7 +155,7 @@
 func defaultContainer(workflow *operatorapi.SonataFlow, plf *operatorapi.SonataFlowPlatform) (*corev1.Container, error) {
 	defaultContainerPort := corev1.ContainerPort{
 		ContainerPort: variables.DefaultHTTPWorkflowPortIntStr.IntVal,
-		Name:          utils.HttpScheme,
+		Name:          utils.DefaultServicePortName,
 		Protocol:      corev1.ProtocolTCP,
 	}
 	defaultFlowContainer := &corev1.Container{
@@ -134,6 +170,7 @@
 				},
 			},
 			TimeoutSeconds: healthTimeoutSeconds,
+			PeriodSeconds:  healthStartedPeriodSeconds,
 		},
 		ReadinessProbe: &corev1.Probe{
 			ProbeHandler: corev1.ProbeHandler{
@@ -143,6 +180,7 @@
 				},
 			},
 			TimeoutSeconds: healthTimeoutSeconds,
+			PeriodSeconds:  healthStartedPeriodSeconds,
 		},
 		StartupProbe: &corev1.Probe{
 			ProbeHandler: corev1.ProbeHandler{
@@ -173,7 +211,7 @@
 	defaultFlowContainer.Name = operatorapi.DefaultContainerName
 	portIdx := -1
 	for i := range defaultFlowContainer.Ports {
-		if defaultFlowContainer.Ports[i].Name == utils.HttpScheme ||
+		if defaultFlowContainer.Ports[i].Name == utils.DefaultServicePortName ||
 			defaultFlowContainer.Ports[i].ContainerPort == variables.DefaultHTTPWorkflowPortIntStr.IntVal {
 			portIdx = i
 			break
diff --git a/controllers/profiles/common/object_creators_test.go b/controllers/profiles/common/object_creators_test.go
index 26b5499..8b42e5f 100644
--- a/controllers/profiles/common/object_creators_test.go
+++ b/controllers/profiles/common/object_creators_test.go
@@ -94,13 +94,13 @@
 
 func TestMergePodSpec(t *testing.T) {
 	workflow := test.GetBaseSonataFlow(t.Name())
-	workflow.Spec.PodTemplate = v1alpha08.PodTemplateSpec{
+	workflow.Spec.PodTemplate = v1alpha08.FlowPodTemplateSpec{
 		Container: v1alpha08.ContainerSpec{
 			// this one we can override
 			Image: "quay.io/example/my-workflow:1.0.0",
 			Ports: []corev1.ContainerPort{
 				// let's override a immutable attribute
-				{Name: utils.HttpScheme, ContainerPort: 9090},
+				{Name: utils.DefaultServicePortName, ContainerPort: 9090},
 			},
 			Env: []corev1.EnvVar{
 				// We should be able to override this too
@@ -147,7 +147,7 @@
 
 func TestMergePodSpec_OverrideContainers(t *testing.T) {
 	workflow := test.GetBaseSonataFlow(t.Name())
-	workflow.Spec.PodTemplate = v1alpha08.PodTemplateSpec{
+	workflow.Spec.PodTemplate = v1alpha08.FlowPodTemplateSpec{
 		PodSpec: v1alpha08.PodSpec{
 			// Try to override the workflow container via the podspec
 			Containers: []corev1.Container{
@@ -155,7 +155,7 @@
 					Name:  v1alpha08.DefaultContainerName,
 					Image: "quay.io/example/my-workflow:1.0.0",
 					Ports: []corev1.ContainerPort{
-						{Name: utils.HttpScheme, ContainerPort: 9090},
+						{Name: utils.DefaultServicePortName, ContainerPort: 9090},
 					},
 					Env: []corev1.EnvVar{
 						{Name: "ENV1", Value: "VALUE_CUSTOM"},
@@ -213,13 +213,13 @@
 func TestMergePodSpec_WithPostgreSQL_and_JDBC_URL_field(t *testing.T) {
 	workflow := test.GetBaseSonataFlow(t.Name())
 	workflow.Spec = v1alpha08.SonataFlowSpec{
-		PodTemplate: v1alpha08.PodTemplateSpec{
+		PodTemplate: v1alpha08.FlowPodTemplateSpec{
 			Container: v1alpha08.ContainerSpec{
 				// this one we can override
 				Image: "quay.io/example/my-workflow:1.0.0",
 				Ports: []corev1.ContainerPort{
 					// let's override a immutable attribute
-					{Name: utils.HttpScheme, ContainerPort: 9090},
+					{Name: utils.DefaultServicePortName, ContainerPort: 9090},
 				},
 				Env: []corev1.EnvVar{
 					// We should be able to override this too
@@ -321,7 +321,7 @@
 func TestMergePodSpec_OverrideContainers_WithPostgreSQL_In_Workflow_CR(t *testing.T) {
 	workflow := test.GetBaseSonataFlow(t.Name())
 	workflow.Spec = v1alpha08.SonataFlowSpec{
-		PodTemplate: v1alpha08.PodTemplateSpec{
+		PodTemplate: v1alpha08.FlowPodTemplateSpec{
 			PodSpec: v1alpha08.PodSpec{
 				// Try to override the workflow container via the podspec
 				Containers: []corev1.Container{
@@ -329,7 +329,7 @@
 						Name:  v1alpha08.DefaultContainerName,
 						Image: "quay.io/example/my-workflow:1.0.0",
 						Ports: []corev1.ContainerPort{
-							{Name: utils.HttpScheme, ContainerPort: 9090},
+							{Name: utils.DefaultServicePortName, ContainerPort: 9090},
 						},
 						Env: []corev1.EnvVar{
 							{Name: "ENV1", Value: "VALUE_CUSTOM"},
@@ -510,7 +510,7 @@
 	}
 	workflow := test.GetBaseSonataFlow(t.Name())
 	workflow.Spec = v1alpha08.SonataFlowSpec{
-		PodTemplate: v1alpha08.PodTemplateSpec{
+		PodTemplate: v1alpha08.FlowPodTemplateSpec{
 			PodSpec: v1alpha08.PodSpec{
 				// Try to override the workflow container via the podspec
 				Containers: []corev1.Container{
@@ -518,7 +518,7 @@
 						Name:  v1alpha08.DefaultContainerName,
 						Image: "quay.io/example/my-workflow:1.0.0",
 						Ports: []corev1.ContainerPort{
-							{Name: utils.HttpScheme, ContainerPort: 9090},
+							{Name: utils.DefaultServicePortName, ContainerPort: 9090},
 						},
 						Env: []corev1.EnvVar{
 							{Name: "ENV1", Value: "VALUE_CUSTOM"},
diff --git a/controllers/profiles/common/persistence/postgresql.go b/controllers/profiles/common/persistence/postgresql.go
index 4bb5f38..e88a342 100644
--- a/controllers/profiles/common/persistence/postgresql.go
+++ b/controllers/profiles/common/persistence/postgresql.go
@@ -25,20 +25,7 @@
 )
 
 const (
-	defaultDatabaseName  = "sonataflow"
-	timeoutSeconds       = 3
-	failureThreshold     = 5
-	initialPeriodSeconds = 15
-	initialDelaySeconds  = 10
-	successThreshold     = 1
-
-	postgreSQLCPULimit      = "500m"
-	postgreSQLMemoryLimit   = "256Mi"
-	postgreSQLMemoryRequest = "256Mi"
-	postgreSQLCPURequest    = "100m"
-
-	defaultPostgreSQLUsername  = "sonataflow"
-	defaultPostgresSQLPassword = "sonataflow"
+	defaultDatabaseName = "sonataflow"
 )
 
 func ConfigurePostgreSQLEnv(postgresql *operatorapi.PersistencePostgreSQL, databaseSchema, databaseNamespace string) []corev1.EnvVar {
diff --git a/controllers/profiles/dev/object_creators_dev.go b/controllers/profiles/dev/object_creators_dev.go
index 45f6926..0f6069f 100644
--- a/controllers/profiles/dev/object_creators_dev.go
+++ b/controllers/profiles/dev/object_creators_dev.go
@@ -90,8 +90,7 @@
 			if err != nil {
 				return err
 			}
-			common.EnsureDeployment(original.(*appsv1.Deployment), object.(*appsv1.Deployment))
-			return nil
+			return common.EnsureDeployment(original.(*appsv1.Deployment), object.(*appsv1.Deployment))
 		}
 	}
 }
diff --git a/controllers/profiles/factory/factory.go b/controllers/profiles/factory/factory.go
index d54b0df..511c6ca 100644
--- a/controllers/profiles/factory/factory.go
+++ b/controllers/profiles/factory/factory.go
@@ -34,10 +34,6 @@
 	"github.com/apache/incubator-kie-kogito-serverless-operator/controllers/profiles/dev"
 )
 
-const (
-	defaultProfile = metadata.PreviewProfile
-)
-
 type reconcilerBuilder func(client client.Client, cfg *rest.Config, recorder record.EventRecorder) profiles.ProfileReconciler
 
 var profileBuilders = map[metadata.ProfileType]reconcilerBuilder{
@@ -47,25 +43,22 @@
 }
 
 func profileBuilder(workflow *operatorapi.SonataFlow) reconcilerBuilder {
-	profile := workflow.Annotations[metadata.Profile]
-	if len(profile) == 0 {
-		profile = defaultProfile.String()
-	}
+	profile := metadata.GetProfileOrDefault(workflow.Annotations)
 	// keep backward compatibility
-	if profile == metadata.ProdProfile.String() {
+	if profile == metadata.ProdProfile {
 		klog.V(log.W).Infof("Profile %s is deprecated, please use '%s' instead.", metadata.ProdProfile, metadata.PreviewProfile)
-		profile = metadata.PreviewProfile.String()
+		profile = metadata.PreviewProfile
 	}
 	// Enforce GitOps profile if the .spec.podTemplate.container.image is set in the Preview profile.
-	if (profile == metadata.PreviewProfile.String() || profile == metadata.ProdProfile.String()) && workflow.HasContainerSpecImage() {
+	if (profile == metadata.PreviewProfile || profile == metadata.ProdProfile) && workflow.HasContainerSpecImage() {
 		workflow.Annotations[metadata.Profile] = metadata.GitOpsProfile.String()
 		return profileBuilders[metadata.GitOpsProfile]
 	}
-	if _, ok := profileBuilders[metadata.ProfileType(profile)]; !ok {
-		klog.V(log.W).Infof("Profile %s not supported, please use '%s' or '%s'. Falling back to %s", profile, metadata.PreviewProfile, metadata.DevProfile, defaultProfile)
-		return profileBuilders[defaultProfile]
+	if _, ok := profileBuilders[profile]; !ok {
+		klog.V(log.W).Infof("Profile %s not supported, please use '%s' or '%s'. Falling back to %s", profile, metadata.PreviewProfile, metadata.DevProfile, metadata.DefaultProfile)
+		return profileBuilders[metadata.DefaultProfile]
 	}
-	return profileBuilders[metadata.ProfileType(profile)]
+	return profileBuilders[profile]
 }
 
 // NewReconciler creates a new ProfileReconciler based on the given workflow and context.
diff --git a/controllers/profiles/gitops/profile_gitops_test.go b/controllers/profiles/gitops/profile_gitops_test.go
index 9287cbc..051ea57 100644
--- a/controllers/profiles/gitops/profile_gitops_test.go
+++ b/controllers/profiles/gitops/profile_gitops_test.go
@@ -29,7 +29,7 @@
 )
 
 func Test_Reconciler_ProdOps(t *testing.T) {
-	workflow := test.GetBaseSonataFlowWithProdOpsProfile(t.Name())
+	workflow := test.GetBaseSonataFlowWithPreviewProfile(t.Name())
 	workflow.Spec.PodTemplate.PodSpec.InitContainers = append(workflow.Spec.PodTemplate.PodSpec.InitContainers, corev1.Container{
 		Name:    "check-postgres",
 		Image:   "registry.access.redhat.com/ubi9/ubi-micro:latest",
diff --git a/controllers/profiles/preview/deployment_handler.go b/controllers/profiles/preview/deployment_handler.go
index 5fce268..dfbac25 100644
--- a/controllers/profiles/preview/deployment_handler.go
+++ b/controllers/profiles/preview/deployment_handler.go
@@ -17,8 +17,8 @@
 import (
 	"context"
 
+	"github.com/apache/incubator-kie-kogito-serverless-operator/controllers/knative"
 	v1 "k8s.io/api/core/v1"
-	ctrl "sigs.k8s.io/controller-runtime"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
 	"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -44,62 +44,23 @@
 }
 
 func (d *DeploymentReconciler) Reconcile(ctx context.Context, workflow *operatorapi.SonataFlow) (reconcile.Result, []client.Object, error) {
-	return d.reconcileWithBuiltImage(ctx, workflow, "")
+	return d.reconcileWithImage(ctx, workflow, "")
 }
 
-func (d *DeploymentReconciler) reconcileWithBuiltImage(ctx context.Context, workflow *operatorapi.SonataFlow, image string) (reconcile.Result, []client.Object, error) {
-	pl, _ := platform.GetActivePlatform(ctx, d.C, workflow.Namespace)
-	userPropsCM, _, err := d.ensurers.userPropsConfigMap.Ensure(ctx, workflow)
-	if err != nil {
-		workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.ExternalResourcesNotFoundReason, "Unable to retrieve the user properties config map")
-		_, err = d.PerformStatusUpdate(ctx, workflow)
-		return ctrl.Result{}, nil, err
-	}
-	managedPropsCM, _, err := d.ensurers.managedPropsConfigMap.Ensure(ctx, workflow, pl,
-		common.ManagedPropertiesMutateVisitor(ctx, d.StateSupport.Catalog, workflow, pl, userPropsCM.(*v1.ConfigMap)))
-	if err != nil {
-		workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.ExternalResourcesNotFoundReason, "Unable to retrieve the managed properties config map")
-		_, err = d.PerformStatusUpdate(ctx, workflow)
-		return ctrl.Result{}, nil, err
+func (d *DeploymentReconciler) reconcileWithImage(ctx context.Context, workflow *operatorapi.SonataFlow, image string) (reconcile.Result, []client.Object, error) {
+	// Checks if we need Knative installed and is not present.
+	if requires, err := d.ensureKnativeServingRequired(workflow); requires || err != nil {
+		return reconcile.Result{Requeue: false}, nil, err
 	}
 
-	deployment, deploymentOp, err :=
-		d.ensurers.deployment.Ensure(
-			ctx,
-			workflow,
-			pl,
-			d.getDeploymentMutateVisitors(workflow, pl, image, userPropsCM.(*v1.ConfigMap), managedPropsCM.(*v1.ConfigMap))...,
-		)
-	if err != nil {
-		workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.DeploymentUnavailableReason, "Unable to perform the deploy due to ", err)
-		_, err = d.PerformStatusUpdate(ctx, workflow)
-		return reconcile.Result{}, nil, err
-	}
-
-	service, _, err := d.ensurers.service.Ensure(ctx, workflow, common.ServiceMutateVisitor(workflow))
-	if err != nil {
-		workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.DeploymentUnavailableReason, "Unable to make the service available due to ", err)
-		_, err = d.PerformStatusUpdate(ctx, workflow)
-		return reconcile.Result{}, nil, err
-	}
-
-	knativeObjs, err := common.NewKnativeEventingHandler(d.StateSupport).Ensure(ctx, workflow)
-	if err != nil {
-		return ctrl.Result{RequeueAfter: constants.RequeueAfterFailure}, nil, err
-	}
-	objs := []client.Object{deployment, service, managedPropsCM}
-	objs = append(objs, knativeObjs...)
-
-	if deploymentOp == controllerutil.OperationResultCreated {
-		workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.WaitingForDeploymentReason, "")
-		if _, err := d.PerformStatusUpdate(ctx, workflow); err != nil {
-			return reconcile.Result{Requeue: false}, nil, err
-		}
-		return reconcile.Result{RequeueAfter: constants.RequeueAfterFollowDeployment, Requeue: true}, objs, nil
+	// Ensure objects
+	result, objs, err := d.ensureObjects(ctx, workflow, image)
+	if err != nil || result.Requeue {
+		return result, objs, err
 	}
 
 	// Follow deployment status
-	result, err := common.DeploymentManager(d.C).SyncDeploymentStatus(ctx, workflow)
+	result, err = common.DeploymentManager(d.C).SyncDeploymentStatus(ctx, workflow)
 	if err != nil {
 		return reconcile.Result{Requeue: false}, nil, err
 	}
@@ -110,15 +71,90 @@
 	return result, objs, nil
 }
 
-func (d *DeploymentReconciler) getDeploymentMutateVisitors(
+// ensureKnativeServingRequired returns true if the SonataFlow instance requires Knative deployment and Knative Serving is not available.
+func (d *DeploymentReconciler) ensureKnativeServingRequired(workflow *operatorapi.SonataFlow) (bool, error) {
+	if workflow.IsKnativeDeployment() {
+		avail, err := knative.GetKnativeAvailability(d.Cfg)
+		if err != nil {
+			return true, err
+		}
+		if !avail.Serving {
+			d.Recorder.Eventf(workflow, v1.EventTypeWarning,
+				"KnativeServingNotAvailable",
+				"Knative Serving is not available in this cluster, can't deploy workflow. Please update the deployment model to %s",
+				operatorapi.KubernetesDeploymentModel)
+			return true, nil
+		}
+	}
+	return false, nil
+}
+
+func (d *DeploymentReconciler) ensureObjects(ctx context.Context, workflow *operatorapi.SonataFlow, image string) (reconcile.Result, []client.Object, error) {
+	pl, _ := platform.GetActivePlatform(ctx, d.C, workflow.Namespace)
+	userPropsCM, _, err := d.ensurers.userPropsConfigMap.Ensure(ctx, workflow)
+	if err != nil {
+		workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.ExternalResourcesNotFoundReason, "Unable to retrieve the user properties config map")
+		_, _ = d.PerformStatusUpdate(ctx, workflow)
+		return reconcile.Result{}, nil, err
+	}
+	managedPropsCM, _, err := d.ensurers.managedPropsConfigMap.Ensure(ctx, workflow, pl,
+		common.ManagedPropertiesMutateVisitor(ctx, d.StateSupport.Catalog, workflow, pl, userPropsCM.(*v1.ConfigMap)))
+	if err != nil {
+		workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.ExternalResourcesNotFoundReason, "Unable to retrieve the managed properties config map")
+		_, _ = d.PerformStatusUpdate(ctx, workflow)
+		return reconcile.Result{}, nil, err
+	}
+
+	deployment, deploymentOp, err :=
+		d.ensurers.DeploymentByDeploymentModel(workflow).Ensure(ctx, workflow, pl,
+			d.deploymentModelMutateVisitors(workflow, pl, image, userPropsCM.(*v1.ConfigMap), managedPropsCM.(*v1.ConfigMap))...)
+	if err != nil {
+		workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.DeploymentUnavailableReason, "Unable to perform the deploy due to ", err)
+		_, _ = d.PerformStatusUpdate(ctx, workflow)
+		return reconcile.Result{}, nil, err
+	}
+
+	service, _, err := d.ensurers.ServiceByDeploymentModel(workflow).Ensure(ctx, workflow, common.ServiceMutateVisitor(workflow))
+	if err != nil {
+		workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.DeploymentUnavailableReason, "Unable to make the service available due to ", err)
+		_, _ = d.PerformStatusUpdate(ctx, workflow)
+		return reconcile.Result{}, nil, err
+	}
+
+	eventingObjs, err := common.NewKnativeEventingHandler(d.StateSupport).Ensure(ctx, workflow)
+	if err != nil {
+		return reconcile.Result{}, nil, err
+	}
+
+	objs := []client.Object{deployment, managedPropsCM, service}
+	if deploymentOp == controllerutil.OperationResultCreated {
+		workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.WaitingForDeploymentReason, "")
+		if _, err := d.PerformStatusUpdate(ctx, workflow); err != nil {
+			return reconcile.Result{}, nil, err
+		}
+		return reconcile.Result{RequeueAfter: constants.RequeueAfterFollowDeployment, Requeue: true}, objs, nil
+	}
+	objs = append(objs, eventingObjs...)
+
+	return reconcile.Result{}, objs, nil
+}
+
+func (d *DeploymentReconciler) deploymentModelMutateVisitors(
 	workflow *operatorapi.SonataFlow,
 	plf *operatorapi.SonataFlowPlatform,
 	image string,
 	userPropsCM *v1.ConfigMap,
 	managedPropsCM *v1.ConfigMap) []common.MutateVisitor {
+
+	if workflow.IsKnativeDeployment() {
+		return []common.MutateVisitor{common.KServiceMutateVisitor(workflow, plf),
+			common.ImageKServiceMutateVisitor(workflow, image),
+			mountConfigMapsMutateVisitor(workflow, userPropsCM, managedPropsCM)}
+	}
+
 	if utils.IsOpenShift() {
 		return []common.MutateVisitor{common.DeploymentMutateVisitor(workflow, plf),
-			mountProdConfigMapsMutateVisitor(workflow, userPropsCM, managedPropsCM),
+			mountConfigMapsMutateVisitor(workflow, userPropsCM, managedPropsCM),
 			addOpenShiftImageTriggerDeploymentMutateVisitor(workflow, image),
 			common.ImageDeploymentMutateVisitor(workflow, image),
 			common.RolloutDeploymentIfCMChangedMutateVisitor(workflow, userPropsCM, managedPropsCM),
@@ -126,6 +162,6 @@
 	}
 	return []common.MutateVisitor{common.DeploymentMutateVisitor(workflow, plf),
 		common.ImageDeploymentMutateVisitor(workflow, image),
-		mountProdConfigMapsMutateVisitor(workflow, userPropsCM, managedPropsCM),
+		mountConfigMapsMutateVisitor(workflow, userPropsCM, managedPropsCM),
 		common.RolloutDeploymentIfCMChangedMutateVisitor(workflow, userPropsCM, managedPropsCM)}
 }
diff --git a/controllers/profiles/preview/deployment_handler_test.go b/controllers/profiles/preview/deployment_handler_test.go
index d70436d..5faf98b 100644
--- a/controllers/profiles/preview/deployment_handler_test.go
+++ b/controllers/profiles/preview/deployment_handler_test.go
@@ -28,10 +28,42 @@
 	corev1 "k8s.io/api/core/v1"
 	"k8s.io/apimachinery/pkg/types"
 	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	servingv1 "knative.dev/serving/pkg/apis/serving/v1"
 )
 
+type fakeDeploymentReconciler struct {
+	DeploymentReconciler
+}
+
+func Test_CheckDeploymentModelIsKnative(t *testing.T) {
+	workflow := test.GetBaseSonataFlowWithPreviewProfile(t.Name())
+	workflow.Spec.PodTemplate.DeploymentModel = v1alpha08.KnativeDeploymentModel
+
+	cli := test.NewSonataFlowClientBuilderWithKnative().
+		WithRuntimeObjects(workflow).
+		WithStatusSubresource(workflow).
+		Build()
+	stateSupport := fakeReconcilerSupport(cli)
+	handler := NewDeploymentReconciler(stateSupport, NewObjectEnsurers(stateSupport))
+
+	result, objects, err := handler.ensureObjects(context.TODO(), workflow, "")
+	assert.NoError(t, err)
+	assert.NotEmpty(t, objects)
+	assert.True(t, result.Requeue)
+
+	var ksvc *servingv1.Service
+	for _, o := range objects {
+		if _, ok := o.(*servingv1.Service); ok {
+			ksvc = o.(*servingv1.Service)
+			assert.Equal(t, v1alpha08.DefaultContainerName, ksvc.Spec.Template.Spec.Containers[0].Name)
+			break
+		}
+	}
+	assert.NotNil(t, ksvc)
+}
+
 func Test_CheckPodTemplateChangesReflectDeployment(t *testing.T) {
-	workflow := test.GetBaseSonataFlowWithProdOpsProfile(t.Name())
+	workflow := test.GetBaseSonataFlowWithPreviewProfile(t.Name())
 
 	client := test.NewSonataFlowClientBuilder().
 		WithRuntimeObjects(workflow).
@@ -53,18 +85,20 @@
 	assert.NoError(t, err)
 	assert.NotEmpty(t, objects)
 	assert.True(t, result.Requeue)
+	var deployment *v1.Deployment
 	for _, o := range objects {
 		if _, ok := o.(*v1.Deployment); ok {
-			deployment := o.(*v1.Deployment)
+			deployment = o.(*v1.Deployment)
 			assert.Equal(t, expectedImg, deployment.Spec.Template.Spec.Containers[0].Image)
 			assert.Equal(t, v1alpha08.DefaultContainerName, deployment.Spec.Template.Spec.Containers[0].Name)
 			break
 		}
 	}
+	assert.NotNil(t, deployment)
 }
 
 func Test_CheckDeploymentRolloutAfterCMChange(t *testing.T) {
-	workflow := test.GetBaseSonataFlowWithProdOpsProfile(t.Name())
+	workflow := test.GetBaseSonataFlowWithPreviewProfile(t.Name())
 
 	client := test.NewSonataFlowClientBuilder().
 		WithRuntimeObjects(workflow).
@@ -126,7 +160,7 @@
 }
 
 func Test_CheckDeploymentUnchangedAfterCMChangeOtherKeys(t *testing.T) {
-	workflow := test.GetBaseSonataFlowWithProdOpsProfile(t.Name())
+	workflow := test.GetBaseSonataFlowWithPreviewProfile(t.Name())
 
 	client := test.NewSonataFlowClientBuilder().
 		WithRuntimeObjects(workflow).
diff --git a/controllers/profiles/preview/object_creators_preview.go b/controllers/profiles/preview/object_creators_preview.go
index 903cd74..a48a4e5 100644
--- a/controllers/profiles/preview/object_creators_preview.go
+++ b/controllers/profiles/preview/object_creators_preview.go
@@ -24,6 +24,7 @@
 
 	appsv1 "k8s.io/api/apps/v1"
 	v1 "k8s.io/api/core/v1"
+	servingv1 "knative.dev/serving/pkg/apis/serving/v1"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
 
@@ -64,28 +65,40 @@
 	}
 }
 
-// mountDevConfigMapsMutateVisitor mounts the required configMaps in the Workflow Dev Deployment
-func mountProdConfigMapsMutateVisitor(workflow *operatorapi.SonataFlow, userPropsCM *v1.ConfigMap, managedPropsCM *v1.ConfigMap) common.MutateVisitor {
+// mountConfigMapsMutateVisitor mounts the required configMaps in the SonataFlow instance
+func mountConfigMapsMutateVisitor(workflow *operatorapi.SonataFlow, userPropsCM *v1.ConfigMap, managedPropsCM *v1.ConfigMap) common.MutateVisitor {
 	return func(object client.Object) controllerutil.MutateFn {
 		return func() error {
-			deployment := object.(*appsv1.Deployment)
-			_, idx := kubeutil.GetContainerByName(v1alpha08.DefaultContainerName, &deployment.Spec.Template.Spec)
+			var podTemplateSpec *v1.PodSpec
 
-			if len(deployment.Spec.Template.Spec.Volumes) == 0 {
-				deployment.Spec.Template.Spec.Volumes = make([]v1.Volume, 0, 1)
+			if workflow.IsKnativeDeployment() {
+				ksvc := object.(*servingv1.Service)
+				podTemplateSpec = &ksvc.Spec.Template.Spec.PodSpec
+			} else {
+				deployment := object.(*appsv1.Deployment)
+				podTemplateSpec = &deployment.Spec.Template.Spec
+				if err := kubeutil.AnnotateDeploymentConfigChecksum(workflow, deployment, userPropsCM, managedPropsCM); err != nil {
+					return err
+				}
 			}
-			if len(deployment.Spec.Template.Spec.Containers[idx].VolumeMounts) == 0 {
-				deployment.Spec.Template.Spec.Containers[idx].VolumeMounts = make([]v1.VolumeMount, 0, 1)
+
+			_, idx := kubeutil.GetContainerByName(v1alpha08.DefaultContainerName, podTemplateSpec)
+
+			if len(podTemplateSpec.Volumes) == 0 {
+				podTemplateSpec.Volumes = make([]v1.Volume, 0, 1)
+			}
+			if len(podTemplateSpec.Containers[idx].VolumeMounts) == 0 {
+				podTemplateSpec.Containers[idx].VolumeMounts = make([]v1.VolumeMount, 0, 1)
 			}
 
 			defaultResourcesVolume := v1.Volume{Name: constants.ConfigMapWorkflowPropsVolumeName, VolumeSource: v1.VolumeSource{Projected: &v1.ProjectedVolumeSource{}}}
 			kubeutil.VolumeProjectionAddConfigMap(defaultResourcesVolume.Projected, userPropsCM.Name, v1.KeyToPath{Key: workflowproj.ApplicationPropertiesFileName, Path: workflowproj.ApplicationPropertiesFileName})
 			kubeutil.VolumeProjectionAddConfigMap(defaultResourcesVolume.Projected, managedPropsCM.Name, v1.KeyToPath{Key: workflowproj.GetManagedPropertiesFileName(workflow), Path: workflowproj.GetManagedPropertiesFileName(workflow)})
-			kubeutil.AddOrReplaceVolume(&deployment.Spec.Template.Spec, defaultResourcesVolume)
-			kubeutil.AddOrReplaceVolumeMount(idx, &deployment.Spec.Template.Spec,
+			kubeutil.AddOrReplaceVolume(podTemplateSpec, defaultResourcesVolume)
+			kubeutil.AddOrReplaceVolumeMount(idx, podTemplateSpec,
 				kubeutil.VolumeMount(constants.ConfigMapWorkflowPropsVolumeName, true, quarkusProdConfigMountPath))
 
-			return kubeutil.AnnotateDeploymentConfigChecksum(workflow, deployment, userPropsCM, managedPropsCM)
+			return nil
 		}
 	}
 }
diff --git a/controllers/profiles/preview/profile_preview.go b/controllers/profiles/preview/profile_preview.go
index a4afea9..1a0dda4 100644
--- a/controllers/profiles/preview/profile_preview.go
+++ b/controllers/profiles/preview/profile_preview.go
@@ -23,6 +23,7 @@
 	"time"
 
 	"github.com/apache/incubator-kie-kogito-serverless-operator/api/metadata"
+	"github.com/apache/incubator-kie-kogito-serverless-operator/api/v1alpha08"
 	"k8s.io/client-go/rest"
 
 	"github.com/apache/incubator-kie-kogito-serverless-operator/controllers/discovery"
@@ -48,20 +49,42 @@
 	quarkusProdConfigMountPath = "/deployments/config"
 )
 
-// ObjectEnsurers is a struct for the objects that ReconciliationState needs to create in the platform for the Production profile.
+// ObjectEnsurers is a struct for the objects that ReconciliationState needs to create in the platform for the preview profile.
 // ReconciliationState that needs access to it must include this struct as an attribute and initialize it in the profile builder.
 // Use NewObjectEnsurers to facilitate building this struct
 type ObjectEnsurers struct {
-	deployment            common.ObjectEnsurerWithPlatform
+	// deployment for this ensurer. Don't call it directly, use DeploymentByDeploymentModel instead
+	deployment common.ObjectEnsurerWithPlatform
+	// kservice Knative Serving deployment for this ensurer. Don't call it directly, use DeploymentByDeploymentModel instead
+	kservice common.ObjectEnsurerWithPlatform
+	// service for this ensurer. Don't call it directly, use ServiceByDeploymentModel instead
 	service               common.ObjectEnsurer
 	userPropsConfigMap    common.ObjectEnsurer
 	managedPropsConfigMap common.ObjectEnsurerWithPlatform
 }
 
+// DeploymentByDeploymentModel gets the deployment ensurer based on the SonataFlow deployment model
+func (o *ObjectEnsurers) DeploymentByDeploymentModel(workflow *v1alpha08.SonataFlow) common.ObjectEnsurerWithPlatform {
+	if workflow.IsKnativeDeployment() {
+		return o.kservice
+	}
+	return o.deployment
+}
+
+// ServiceByDeploymentModel gets the service ensurer based on the SonataFlow deployment model
+func (o *ObjectEnsurers) ServiceByDeploymentModel(workflow *v1alpha08.SonataFlow) common.ObjectEnsurer {
+	if workflow.IsKnativeDeployment() {
+		// Knative Serving handles the service
+		return common.NewNoopObjectEnsurer()
+	}
+	return o.service
+}
+
 // NewObjectEnsurers common.ObjectEnsurer(s) for the preview profile.
 func NewObjectEnsurers(support *common.StateSupport) *ObjectEnsurers {
 	return &ObjectEnsurers{
 		deployment:            common.NewObjectEnsurerWithPlatform(support.C, common.DeploymentCreator),
+		kservice:              common.NewObjectEnsurerWithPlatform(support.C, common.KServiceCreator),
 		service:               common.NewObjectEnsurer(support.C, common.ServiceCreator),
 		userPropsConfigMap:    common.NewObjectEnsurer(support.C, common.UserPropsConfigMapCreator),
 		managedPropsConfigMap: common.NewObjectEnsurerWithPlatform(support.C, common.ManagedPropsConfigMapCreator),
diff --git a/controllers/profiles/preview/profile_preview_test.go b/controllers/profiles/preview/profile_preview_test.go
index cc518db..22146cc 100644
--- a/controllers/profiles/preview/profile_preview_test.go
+++ b/controllers/profiles/preview/profile_preview_test.go
@@ -198,5 +198,6 @@
 	return &common.StateSupport{
 		C:        client,
 		Recorder: test.NewFakeRecorder(),
+		Cfg:      &rest.Config{},
 	}
 }
diff --git a/controllers/profiles/preview/states_preview.go b/controllers/profiles/preview/states_preview.go
index 933820e..4612c84 100644
--- a/controllers/profiles/preview/states_preview.go
+++ b/controllers/profiles/preview/states_preview.go
@@ -189,7 +189,7 @@
 	}
 
 	// didn't change, business as usual
-	return NewDeploymentReconciler(h.StateSupport, h.ensurers).reconcileWithBuiltImage(ctx, workflow, build.Status.ImageTag)
+	return NewDeploymentReconciler(h.StateSupport, h.ensurers).reconcileWithImage(ctx, workflow, build.Status.ImageTag)
 }
 
 func (h *deployWithBuildWorkflowState) PostReconcile(ctx context.Context, workflow *operatorapi.SonataFlow) error {
diff --git a/controllers/sonataflow_controller.go b/controllers/sonataflow_controller.go
index 447e386..7c1d28a 100644
--- a/controllers/sonataflow_controller.go
+++ b/controllers/sonataflow_controller.go
@@ -23,6 +23,7 @@
 	"context"
 	"fmt"
 
+	"github.com/apache/incubator-kie-kogito-serverless-operator/api/metadata"
 	"k8s.io/klog/v2"
 
 	profiles "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/profiles/factory"
@@ -89,6 +90,8 @@
 		return ctrl.Result{}, err
 	}
 
+	r.setDefaults(workflow)
+
 	// Only process resources assigned to the operator
 	if !platform.IsOperatorHandlerConsideringLock(ctx, r.Client, req.Namespace, workflow) {
 		klog.V(log.I).InfoS("Ignoring request because resource is not assigned to current operator")
@@ -97,6 +100,18 @@
 	return profiles.NewReconciler(r.Client, r.Config, r.Recorder, workflow).Reconcile(ctx, workflow)
 }
 
+// TODO: move to webhook see https://github.com/apache/incubator-kie-kogito-serverless-operator/pull/239
+func (r *SonataFlowReconciler) setDefaults(workflow *operatorapi.SonataFlow) {
+	if workflow.Annotations == nil {
+		workflow.Annotations = map[string]string{}
+	}
+	profile := metadata.GetProfileOrDefault(workflow.Annotations)
+	workflow.Annotations[metadata.Profile] = string(profile)
+	if profile == metadata.DevProfile {
+		workflow.Spec.PodTemplate.DeploymentModel = operatorapi.KubernetesDeploymentModel
+	}
+}
+
 func platformEnqueueRequestsFromMapFunc(c client.Client, p *operatorapi.SonataFlowPlatform) []reconcile.Request {
 	var requests []reconcile.Request
 
diff --git a/hack/local/run-operator.sh b/hack/local/run-operator.sh
index 5cb8a73..8b57477 100755
--- a/hack/local/run-operator.sh
+++ b/hack/local/run-operator.sh
@@ -17,6 +17,9 @@
 # under the License.
 
 # Runs the operator locally via go main
+POD_NAMESPACE=$(kubectl config view --minify | grep namespace | cut -d" " -f6)
+
+export POD_NAMESPACE
 
 kubectl delete --ignore-not-found=true -f ./bundle/manifests/sonataflow.org_sonataflowclusterplatforms.yaml
 kubectl delete --ignore-not-found=true -f ./bundle/manifests/sonataflow.org_sonataflowplatforms.yaml
diff --git a/main.go b/main.go
index 535a3ed..e0265ed 100644
--- a/main.go
+++ b/main.go
@@ -26,6 +26,7 @@
 	"github.com/apache/incubator-kie-kogito-serverless-operator/controllers/cfg"
 	eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
 	sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1"
+	servingv1 "knative.dev/serving/pkg/apis/serving/v1"
 
 	"k8s.io/klog/v2/klogr"
 
@@ -60,6 +61,7 @@
 	utilruntime.Must(operatorapi.AddToScheme(scheme))
 	utilruntime.Must(sourcesv1.AddToScheme(scheme))
 	utilruntime.Must(eventingv1.AddToScheme(scheme))
+	utilruntime.Must(servingv1.AddToScheme(scheme))
 	//+kubebuilder:scaffold:scheme
 }
 
diff --git a/operator.yaml b/operator.yaml
index 14dfec1..f2ef717 100644
--- a/operator.yaml
+++ b/operator.yaml
@@ -22523,6 +22523,13 @@
                       - name
                       type: object
                     type: array
+                  deploymentModel:
+                    description: Defines the kind of deployment model for this pod
+                      spec. In dev profile, only "kubernetes" is valid.
+                    enum:
+                    - kubernetes
+                    - knative
+                    type: string
                   dnsConfig:
                     description: Specifies the DNS parameters of a pod. Parameters
                       specified here will be merged to the generated DNS configuration
@@ -24050,6 +24057,8 @@
                       type: object
                     type: array
                   replicas:
+                    description: Replicas define the number of pods to start by default
+                      for this deployment model. Ignored in "knative" deployment model.
                     format: int32
                     type: integer
                   resourceClaims:
@@ -26492,6 +26501,12 @@
   - patch
   - update
   - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: sonataflow-operator-knative-manager-role
+rules:
 - apiGroups:
   - eventing.knative.dev
   resources:
@@ -26522,6 +26537,22 @@
   - patch
   - update
   - watch
+- apiGroups:
+  - serving.knative.dev
+  resources:
+  - service
+  - services
+  - services/status
+  - services/finalizers
+  verbs:
+  - create
+  - delete
+  - deletecollection
+  - get
+  - list
+  - patch
+  - update
+  - watch
 ---
 apiVersion: rbac.authorization.k8s.io/v1
 kind: ClusterRole
@@ -26865,6 +26896,19 @@
 apiVersion: rbac.authorization.k8s.io/v1
 kind: ClusterRoleBinding
 metadata:
+  name: sonataflow-operator-knative-manager-rolebinding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: sonataflow-operator-knative-manager-role
+subjects:
+- kind: ServiceAccount
+  name: sonataflow-operator-controller-manager
+  namespace: sonataflow-operator-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
   name: sonataflow-operator-leases-binding
 roleRef:
   apiGroup: rbac.authorization.k8s.io
diff --git a/test/kubernetes_cli.go b/test/kubernetes_cli.go
index c4d7021..72029fd 100644
--- a/test/kubernetes_cli.go
+++ b/test/kubernetes_cli.go
@@ -33,6 +33,7 @@
 	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
 	"k8s.io/client-go/kubernetes/scheme"
 	"k8s.io/client-go/tools/record"
+	servingv1 "knative.dev/serving/pkg/apis/serving/v1"
 	ctrl "sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/client/fake"
 
@@ -50,6 +51,13 @@
 	return fake.NewClientBuilder().WithScheme(s)
 }
 
+func NewSonataFlowClientBuilderWithKnative() *fake.ClientBuilder {
+	s := scheme.Scheme
+	utilruntime.Must(operatorapi.AddToScheme(s))
+	utilruntime.Must(servingv1.AddToScheme(s))
+	return fake.NewClientBuilder().WithScheme(s)
+}
+
 // NewKogitoClientBuilderWithOpenShift creates a new fake client with OpenShift schemas.
 // If your object is not present, just add in the list below.
 func NewKogitoClientBuilderWithOpenShift() *fake.ClientBuilder {
diff --git a/test/yaml.go b/test/yaml.go
index 84ac1b7..fd96943 100644
--- a/test/yaml.go
+++ b/test/yaml.go
@@ -215,8 +215,8 @@
 	return NewSonataFlow(sonataFlowSampleYamlCR, namespace, SetPreviewProfile)
 }
 
-// GetBaseSonataFlowWithProdOpsProfile gets a base workflow that has a pre-built image set in podTemplate.
-func GetBaseSonataFlowWithProdOpsProfile(namespace string) *operatorapi.SonataFlow {
+// GetBaseSonataFlowWithPreviewProfile gets a base workflow that has a pre-built image set in podTemplate.
+func GetBaseSonataFlowWithPreviewProfile(namespace string) *operatorapi.SonataFlow {
 	return NewSonataFlow(SonataFlowSimpleOpsYamlCR, namespace)
 }
 
diff --git a/testbdd/go.mod b/testbdd/go.mod
index 5f0c0ed..bea1546 100644
--- a/testbdd/go.mod
+++ b/testbdd/go.mod
@@ -66,6 +66,7 @@
 	github.com/golang/protobuf v1.5.3 // indirect
 	github.com/google/gnostic v0.6.9 // indirect
 	github.com/google/go-cmp v0.6.0 // indirect
+	github.com/google/go-containerregistry v0.13.0 // indirect
 	github.com/google/gofuzz v1.2.0 // indirect
 	github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect
 	github.com/google/uuid v1.3.1 // indirect
@@ -89,6 +90,7 @@
 	github.com/modern-go/reflect2 v1.0.2 // indirect
 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
 	github.com/onsi/ginkgo/v2 v2.13.0 // indirect
+	github.com/opencontainers/go-digest v1.0.0 // indirect
 	github.com/openshift/client-go v0.0.0-20230503144108-75015d2347cb // indirect
 	github.com/pkg/errors v0.9.1 // indirect
 	github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@@ -137,7 +139,9 @@
 	k8s.io/klog/v2 v2.100.1 // indirect
 	k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 // indirect
 	k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect
+	knative.dev/networking v0.0.0-20231017124814-2a7676e912b7 // indirect
 	knative.dev/pkg v0.0.0-20231023151236-29775d7c9e5c // indirect
+	knative.dev/serving v0.39.0 // indirect
 	sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
 	sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect
 	sigs.k8s.io/yaml v1.3.0 // indirect
diff --git a/testbdd/go.sum b/testbdd/go.sum
index 2e0cf29..16617ae 100644
--- a/testbdd/go.sum
+++ b/testbdd/go.sum
@@ -460,6 +460,7 @@
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-containerregistry v0.13.0 h1:y1C7Z3e149OJbOPDBxLYR8ITPz8dTKqQwjErKVHJC8k=
 github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
 github.com/google/go-github/v27 v27.0.6/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0=
 github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
@@ -718,6 +719,7 @@
 github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
 github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
 github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
 github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
 github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
 github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
@@ -1566,10 +1568,12 @@
 knative.dev/eventing v0.26.0/go.mod h1:6tTam0lsPtBSJHJ63/195obj2VAHlTZZB7TLiBSeqk0=
 knative.dev/hack v0.0.0-20210806075220-815cd312d65c/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
 knative.dev/hack/schema v0.0.0-20210806075220-815cd312d65c/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0=
+knative.dev/networking v0.0.0-20231017124814-2a7676e912b7 h1:6+1icZuxiZO1paFZ4d/ysKWVG2M4WB7OxNJNyLG0P/E=
 knative.dev/pkg v0.0.0-20210914164111-4857ab6939e3/go.mod h1:jMSqkNMsrzuy+XR4Yr/BMy7SDVbUOl3KKB6+5MR+ZU8=
 knative.dev/pkg v0.0.0-20210919202233-5ae482141474/go.mod h1:jMSqkNMsrzuy+XR4Yr/BMy7SDVbUOl3KKB6+5MR+ZU8=
 knative.dev/pkg v0.0.0-20231023151236-29775d7c9e5c h1:xyPoEToTWeBdn6tinhLxXfnhJhTNQt5WzHiTNiFphRw=
 knative.dev/reconciler-test v0.0.0-20210915181908-49fac7555086/go.mod h1:6yDmb26SINSmgw6wVy9qQwgRMewiW8ddkkwGLR0ZvOY=
+knative.dev/serving v0.39.0 h1:NVt8WthHmFFMWZ3qpBblXt47del8qqrbCegqwGBVSwk=
 modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
 modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
 modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
diff --git a/utils/common.go b/utils/common.go
index e24e869..073bb25 100644
--- a/utils/common.go
+++ b/utils/common.go
@@ -28,7 +28,11 @@
 )
 
 const (
-	HttpScheme = "http"
+	// DefaultServicePortName default service name to increase compatibility with Knative
+	//
+	// see: https://github.com/knative/specs/blob/main/specs/serving/runtime-contract.md#protocols-and-ports
+	// By default we do support HTTP/2:https://quarkus.io/guides/http-reference#http2-support
+	DefaultServicePortName = "h2c"
 )
 
 // GetOperatorIDAnnotation to safely get the operator id annotation value.
diff --git a/workflowproj/workflowproj.go b/workflowproj/workflowproj.go
index bd8653d..2bf1deb 100644
--- a/workflowproj/workflowproj.go
+++ b/workflowproj/workflowproj.go
@@ -304,9 +304,5 @@
 
 // IsDevProfile detects if the workflow is using the Dev profile or not
 func IsDevProfile(workflow *operatorapi.SonataFlow) bool {
-	profile := workflow.Annotations[metadata.Profile]
-	if len(profile) == 0 {
-		return false
-	}
-	return metadata.ProfileType(profile) == metadata.DevProfile
+	return metadata.IsDevProfile(workflow.Annotations)
 }