Use DeferCleanup() and a sharedZK in e2e tests (#562)

Overall just making the e2e tests cleaner and faster.
diff --git a/api/v1beta1/solrcloud_types.go b/api/v1beta1/solrcloud_types.go
index f78d3a0..8c9145c 100644
--- a/api/v1beta1/solrcloud_types.go
+++ b/api/v1beta1/solrcloud_types.go
@@ -1211,6 +1211,10 @@
 	return podNames
 }
 
+func (sc *SolrCloud) GetSolrPodName(podNumber int) string {
+	return fmt.Sprintf("%s-%d", sc.StatefulSetName(), podNumber)
+}
+
 func (sc *SolrCloud) BasicAuthSecretName() string {
 	if sc.Spec.SolrSecurity != nil && sc.Spec.SolrSecurity.BasicAuthSecret != "" {
 		return sc.Spec.SolrSecurity.BasicAuthSecret
diff --git a/tests/e2e/backups_test.go b/tests/e2e/backups_test.go
index 3f0a95b..3188bf7 100644
--- a/tests/e2e/backups_test.go
+++ b/tests/e2e/backups_test.go
@@ -25,10 +25,8 @@
 	. "github.com/onsi/gomega"
 	. "github.com/onsi/gomega/gstruct"
 	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/util/rand"
-	"strings"
 	"time"
 )
 
@@ -47,44 +45,14 @@
 		Create a single SolrCloud that all PrometheusExporter tests in this "Describe" will use.
 	*/
 	BeforeAll(func(ctx context.Context) {
-		solrCloud = &solrv1beta1.SolrCloud{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:      "foo",
-				Namespace: testNamespace(),
-			},
-			Spec: solrv1beta1.SolrCloudSpec{
-				Replicas: &two,
-				SolrImage: &solrv1beta1.ContainerImage{
-					Repository: strings.Split(solrImage, ":")[0],
-					Tag:        strings.Split(solrImage+":", ":")[1],
-					PullPolicy: corev1.PullIfNotPresent,
-				},
-				ZookeeperRef: &solrv1beta1.ZookeeperRef{
-					ProvidedZookeeper: &solrv1beta1.ZookeeperSpec{
-						Replicas:  &one,
-						Ephemeral: &solrv1beta1.ZKEphemeral{},
-					},
-				},
-				SolrJavaMem: "-Xms512m -Xmx512m",
-				CustomSolrKubeOptions: solrv1beta1.CustomSolrKubeOptions{
-					PodOptions: &solrv1beta1.PodOptions{
-						Resources: corev1.ResourceRequirements{
-							Requests: corev1.ResourceList{
-								corev1.ResourceMemory: resource.MustParse("600Mi"),
-								corev1.ResourceCPU:    resource.MustParse("1"),
-							},
-						},
-					},
-				},
-				BackupRepositories: []solrv1beta1.SolrBackupRepository{
-					{
-						Name: localBackupRepository,
-						Volume: &solrv1beta1.VolumeRepository{
-							Source: corev1.VolumeSource{
-								HostPath: &corev1.HostPathVolumeSource{
-									Path: backupDirHostPath,
-								},
-							},
+		solrCloud = generateBaseSolrCloud(2)
+		solrCloud.Spec.BackupRepositories = []solrv1beta1.SolrBackupRepository{
+			{
+				Name: localBackupRepository,
+				Volume: &solrv1beta1.VolumeRepository{
+					Source: corev1.VolumeSource{
+						HostPath: &corev1.HostPathVolumeSource{
+							Path: backupDirHostPath,
 						},
 					},
 				},
@@ -94,6 +62,10 @@
 		By("creating the SolrCloud")
 		Expect(k8sClient.Create(ctx, solrCloud)).To(Succeed())
 
+		DeferCleanup(func(ctx context.Context) {
+			cleanupTest(ctx, solrCloud)
+		})
+
 		By("Waiting for the SolrCloud to come up healthy")
 		solrCloud = expectSolrCloudWithChecks(ctx, solrCloud, func(g Gomega, found *solrv1beta1.SolrCloud) {
 			g.Expect(found.Status.ReadyReplicas).To(Equal(*found.Spec.Replicas), "The SolrCloud should have all nodes come up healthy")
@@ -127,14 +99,10 @@
 
 		By("creating a SolrBackup")
 		Expect(k8sClient.Create(ctx, solrBackup)).To(Succeed())
-	})
 
-	AfterAll(func(ctx context.Context) {
-		cleanupTest(ctx, solrCloud)
-	})
-
-	AfterEach(func(ctx context.Context) {
-		deleteAndWait(ctx, solrBackup)
+		DeferCleanup(func(ctx context.Context) {
+			deleteAndWait(ctx, solrBackup)
+		})
 	})
 
 	FContext("Local Directory - Recurring", func() {
diff --git a/tests/e2e/prometheus_exporter_test.go b/tests/e2e/prometheus_exporter_test.go
index 67b1932..e5d840c 100644
--- a/tests/e2e/prometheus_exporter_test.go
+++ b/tests/e2e/prometheus_exporter_test.go
@@ -22,10 +22,7 @@
 	solrv1beta1 "github.com/apache/solr-operator/api/v1beta1"
 	. "github.com/onsi/ginkgo/v2"
 	. "github.com/onsi/gomega"
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"strings"
 )
 
 /*
@@ -46,41 +43,15 @@
 		Create a single SolrCloud that all PrometheusExporter tests in this "Describe" will use.
 	*/
 	BeforeAll(func(ctx context.Context) {
-		solrCloud = &solrv1beta1.SolrCloud{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:      "foo",
-				Namespace: testNamespace(),
-			},
-			Spec: solrv1beta1.SolrCloudSpec{
-				Replicas: &two,
-				SolrImage: &solrv1beta1.ContainerImage{
-					Repository: strings.Split(solrImage, ":")[0],
-					Tag:        strings.Split(solrImage+":", ":")[1],
-					PullPolicy: corev1.PullIfNotPresent,
-				},
-				ZookeeperRef: &solrv1beta1.ZookeeperRef{
-					ProvidedZookeeper: &solrv1beta1.ZookeeperSpec{
-						Replicas:  &one,
-						Ephemeral: &solrv1beta1.ZKEphemeral{},
-					},
-				},
-				SolrJavaMem: "-Xms512m -Xmx512m",
-				CustomSolrKubeOptions: solrv1beta1.CustomSolrKubeOptions{
-					PodOptions: &solrv1beta1.PodOptions{
-						Resources: corev1.ResourceRequirements{
-							Requests: corev1.ResourceList{
-								corev1.ResourceMemory: resource.MustParse("600Mi"),
-								corev1.ResourceCPU:    resource.MustParse("1"),
-							},
-						},
-					},
-				},
-			},
-		}
+		solrCloud = generateBaseSolrCloud(2)
 
 		By("creating the SolrCloud")
 		Expect(k8sClient.Create(ctx, solrCloud)).To(Succeed())
 
+		DeferCleanup(func(ctx context.Context) {
+			cleanupTest(ctx, solrCloud)
+		})
+
 		By("waiting for the SolrCloud to come up healthy")
 		solrCloud = expectSolrCloudWithChecks(ctx, solrCloud, func(g Gomega, found *solrv1beta1.SolrCloud) {
 			g.Expect(found.Status.ReadyReplicas).To(Equal(*found.Spec.Replicas), "The SolrCloud should have all nodes come up healthy")
@@ -104,6 +75,10 @@
 		By("creating a SolrPrometheusExporter")
 		Expect(k8sClient.Create(ctx, solrPrometheusExporter)).To(Succeed())
 
+		DeferCleanup(func(ctx context.Context) {
+			deleteAndWait(ctx, solrPrometheusExporter)
+		})
+
 		By("waiting for the SolrPrometheusExporter to come up healthy")
 		solrPrometheusExporter = expectSolrPrometheusExporterWithChecks(ctx, solrPrometheusExporter, func(g Gomega, found *solrv1beta1.SolrPrometheusExporter) {
 			g.Expect(found.Status.Ready).To(BeTrue(), "The SolrPrometheusExporter should come up healthy")
@@ -113,14 +88,6 @@
 		checkMetrics(ctx, solrPrometheusExporter, solrCloud, solrCollection)
 	})
 
-	AfterEach(func(ctx context.Context) {
-		deleteAndWait(ctx, solrPrometheusExporter)
-	})
-
-	AfterAll(func(ctx context.Context) {
-		cleanupTest(ctx, solrCloud)
-	})
-
 	FContext("Default - Solr Reference", func() {
 		BeforeEach(func() {
 			solrPrometheusExporter.Spec.SolrReference = solrv1beta1.SolrReference{
diff --git a/tests/e2e/resource_utils_test.go b/tests/e2e/resource_utils_test.go
index 87d3ada..71aa698 100644
--- a/tests/e2e/resource_utils_test.go
+++ b/tests/e2e/resource_utils_test.go
@@ -177,6 +177,35 @@
 	return foundSolrBackup
 }
 
+func expectZookeeperCluster(ctx context.Context, parentResource client.Object, zkName string, additionalOffset ...int) *zkApi.ZookeeperCluster {
+	return expectZookeeperClusterWithChecks(ctx, parentResource, zkName, nil, resolveOffset(additionalOffset))
+}
+
+func expectZookeeperClusterWithChecks(ctx context.Context, parentResource client.Object, zkName string, additionalChecks func(Gomega, *zkApi.ZookeeperCluster), additionalOffset ...int) *zkApi.ZookeeperCluster {
+	found := &zkApi.ZookeeperCluster{}
+	EventuallyWithOffset(resolveOffset(additionalOffset), func(g Gomega) {
+		g.Expect(k8sClient.Get(ctx, resourceKey(parentResource, zkName), found)).To(Succeed(), "Expected ZookeeperCluster does not exist")
+		if additionalChecks != nil {
+			additionalChecks(g, found)
+		}
+	}).Should(Succeed())
+
+	return found
+}
+
+func expectZookeeperClusterWithConsistentChecks(ctx context.Context, parentResource client.Object, zkName string, additionalChecks func(Gomega, *zkApi.ZookeeperCluster), additionalOffset ...int) *zkApi.ZookeeperCluster {
+	found := &zkApi.ZookeeperCluster{}
+	ConsistentlyWithOffset(resolveOffset(additionalOffset), func(g Gomega) {
+		g.Expect(k8sClient.Get(ctx, resourceKey(parentResource, zkName), found)).To(Succeed(), "Expected ZookeeperCluster does not exist")
+
+		if additionalChecks != nil {
+			additionalChecks(g, found)
+		}
+	}).Should(Succeed())
+
+	return found
+}
+
 func expectSecret(ctx context.Context, parentResource client.Object, secretName string, additionalOffset ...int) *corev1.Secret {
 	return expectSecretWithChecks(ctx, parentResource, secretName, nil, resolveOffset(additionalOffset))
 }
diff --git a/tests/e2e/solrcloud_basic_test.go b/tests/e2e/solrcloud_basic_test.go
new file mode 100644
index 0000000..673377d
--- /dev/null
+++ b/tests/e2e/solrcloud_basic_test.go
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package e2e
+
+import (
+	"context"
+	solrv1beta1 "github.com/apache/solr-operator/api/v1beta1"
+	. "github.com/onsi/ginkgo/v2"
+	. "github.com/onsi/gomega"
+	"k8s.io/utils/pointer"
+)
+
+var _ = FDescribe("E2E - SolrCloud - Basic", func() {
+	var (
+		solrCloud *solrv1beta1.SolrCloud
+
+		solrCollection1 = "e2e-1"
+
+		solrCollection2 = "e2e-2"
+	)
+
+	BeforeEach(func() {
+		solrCloud = generateBaseSolrCloud(1)
+	})
+
+	JustBeforeEach(func(ctx context.Context) {
+		By("creating the SolrCloud")
+		Expect(k8sClient.Create(ctx, solrCloud)).To(Succeed())
+
+		DeferCleanup(func(ctx context.Context) {
+			cleanupTest(ctx, solrCloud)
+		})
+
+		By("Waiting for the SolrCloud to come up healthy")
+		solrCloud = expectSolrCloudWithChecks(ctx, solrCloud, func(g Gomega, found *solrv1beta1.SolrCloud) {
+			g.Expect(found.Status.ReadyReplicas).To(Equal(*found.Spec.Replicas), "The SolrCloud should have all nodes come up healthy")
+		})
+
+		By("creating a first Solr Collection")
+		createAndQueryCollection(solrCloud, solrCollection1, 1, 2)
+
+		By("creating a second Solr Collection")
+		createAndQueryCollection(solrCloud, solrCollection2, 2, 1)
+	})
+
+	FContext("Provided Zookeeper", func() {
+		BeforeEach(func() {
+			solrCloud.Spec.ZookeeperRef = &solrv1beta1.ZookeeperRef{
+				ProvidedZookeeper: &solrv1beta1.ZookeeperSpec{
+					Replicas:  pointer.Int32(1),
+					Ephemeral: &solrv1beta1.ZKEphemeral{},
+				},
+			}
+		})
+
+		// All testing will be done in the "JustBeforeEach" logic, no additional tests required here
+		FIt("Starts correctly", func(ctx context.Context) {})
+	})
+})
diff --git a/tests/e2e/solrcloud_rolling_upgrade_test.go b/tests/e2e/solrcloud_rolling_upgrade_test.go
index f261b44..c9256ff 100644
--- a/tests/e2e/solrcloud_rolling_upgrade_test.go
+++ b/tests/e2e/solrcloud_rolling_upgrade_test.go
@@ -22,12 +22,8 @@
 	solrv1beta1 "github.com/apache/solr-operator/api/v1beta1"
 	. "github.com/onsi/ginkgo/v2"
 	. "github.com/onsi/gomega"
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/resource"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/util/intstr"
 	"sigs.k8s.io/controller-runtime/pkg/client"
-	"strings"
 )
 
 var _ = FDescribe("E2E - SolrCloud - Rolling Upgrades", func() {
@@ -40,43 +36,17 @@
 	)
 
 	BeforeEach(func() {
-		solrCloud = &solrv1beta1.SolrCloud{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:      "foo",
-				Namespace: testNamespace(),
-			},
-			Spec: solrv1beta1.SolrCloudSpec{
-				Replicas: &three,
-				SolrImage: &solrv1beta1.ContainerImage{
-					Repository: strings.Split(solrImage, ":")[0],
-					Tag:        strings.Split(solrImage+":", ":")[1],
-					PullPolicy: corev1.PullIfNotPresent,
-				},
-				ZookeeperRef: &solrv1beta1.ZookeeperRef{
-					ProvidedZookeeper: &solrv1beta1.ZookeeperSpec{
-						Replicas:  &one,
-						Ephemeral: &solrv1beta1.ZKEphemeral{},
-					},
-				},
-				SolrJavaMem: "-Xms512m -Xmx512m",
-				CustomSolrKubeOptions: solrv1beta1.CustomSolrKubeOptions{
-					PodOptions: &solrv1beta1.PodOptions{
-						Resources: corev1.ResourceRequirements{
-							Requests: corev1.ResourceList{
-								corev1.ResourceMemory: resource.MustParse("600Mi"),
-								corev1.ResourceCPU:    resource.MustParse("1"),
-							},
-						},
-					},
-				},
-			},
-		}
+		solrCloud = generateBaseSolrCloud(3)
 	})
 
 	JustBeforeEach(func(ctx context.Context) {
 		By("creating the SolrCloud")
 		Expect(k8sClient.Create(ctx, solrCloud)).To(Succeed())
 
+		DeferCleanup(func(ctx context.Context) {
+			cleanupTest(ctx, solrCloud)
+		})
+
 		By("Waiting for the SolrCloud to come up healthy")
 		solrCloud = expectSolrCloudWithChecks(ctx, solrCloud, func(g Gomega, found *solrv1beta1.SolrCloud) {
 			g.Expect(found.Status.ReadyReplicas).To(Equal(*found.Spec.Replicas), "The SolrCloud should have all nodes come up healthy")
@@ -89,10 +59,6 @@
 		createAndQueryCollection(solrCloud, solrCollection2, 2, 1)
 	})
 
-	AfterEach(func(ctx context.Context) {
-		cleanupTest(ctx, solrCloud)
-	})
-
 	FContext("Managed Update - Ephemeral Data - Slow", func() {
 		BeforeEach(func() {
 			one := intstr.FromInt(1)
diff --git a/tests/e2e/suite_test.go b/tests/e2e/suite_test.go
index d16c9e0..14eeb3d 100644
--- a/tests/e2e/suite_test.go
+++ b/tests/e2e/suite_test.go
@@ -25,9 +25,6 @@
 	"github.com/go-logr/logr"
 	"github.com/onsi/ginkgo/v2/types"
 	zkApi "github.com/pravega/zookeeper-operator/api/v1beta1"
-	"helm.sh/helm/v3/pkg/release"
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/client-go/kubernetes"
 	"k8s.io/client-go/kubernetes/scheme"
 	"k8s.io/client-go/rest"
@@ -49,14 +46,18 @@
 	solrImageEnv     = "SOLR_IMAGE"
 
 	backupDirHostPath = "/tmp/backup"
+
+	// Shared testing specs
+	timeout  = time.Second * 180
+	duration = time.Millisecond * 500
+	interval = time.Millisecond * 250
 )
 
 var (
-	solrOperatorRelease *release.Release
-	k8sClient           client.Client
-	rawK8sClient        *kubernetes.Clientset
-	k8sConfig           *rest.Config
-	logger              logr.Logger
+	k8sClient    client.Client
+	rawK8sClient *kubernetes.Clientset
+	k8sConfig    *rest.Config
+	logger       logr.Logger
 
 	defaultOperatorImage = "apache/solr-operator:" + version.FullVersion()
 	defaultSolrImage     = "solr:8.11"
@@ -73,20 +74,34 @@
 }
 
 var _ = SynchronizedBeforeSuite(func(ctx context.Context) {
+	// Define testing timeouts/durations and intervals.
+	SetDefaultEventuallyTimeout(timeout)
+	SetDefaultEventuallyPollingInterval(interval)
+
+	logger = zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))
+	logf.SetLogger(logger)
+
 	// Run this once before all tests, not per-test-process
 	By("starting the test solr operator")
-	solrOperatorRelease = runSolrOperator(ctx)
+	solrOperatorRelease := runSolrOperator(ctx)
 	Expect(solrOperatorRelease).ToNot(BeNil())
+
+	var err error
+	k8sConfig, err = config.GetConfig()
+	Expect(err).NotTo(HaveOccurred(), "Could not load in default kubernetes config")
+	Expect(zkApi.AddToScheme(scheme.Scheme)).To(Succeed())
+	k8sClient, err = client.New(k8sConfig, client.Options{Scheme: scheme.Scheme})
+	Expect(err).NotTo(HaveOccurred(), "Could not create controllerRuntime Kubernetes client")
+
+	// Set up a shared Zookeeper Cluster to be used for most SolrClouds
+	// This will significantly speed up tests
+	By("starting a shared zookeeper cluster")
+	runSharedZookeeperCluster(ctx)
 }, func(ctx context.Context) {
 	// Run these in each parallel test process before the tests
 	rand.Seed(GinkgoRandomSeed() + int64(GinkgoParallelProcess()))
 
 	// Define testing timeouts/durations and intervals.
-	const (
-		timeout  = time.Second * 180
-		duration = time.Millisecond * 500
-		interval = time.Millisecond * 250
-	)
 	SetDefaultConsistentlyDuration(duration)
 	SetDefaultConsistentlyPollingInterval(interval)
 	SetDefaultEventuallyTimeout(timeout)
@@ -109,30 +124,8 @@
 	k8sClient, err = client.New(k8sConfig, client.Options{Scheme: scheme.Scheme})
 	Expect(err).NotTo(HaveOccurred(), "Could not create controllerRuntime Kubernetes client")
 
-	// Delete the testing namespace if it already exists, then recreate it below
-	namespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace()}}
-	err = k8sClient.Get(ctx, client.ObjectKey{Name: testNamespace()}, namespace)
-	if err == nil {
-		By("deleting the existing namespace for this parallel test process before recreating it")
-		deleteAndWait(ctx, namespace)
-	}
-
 	By("creating a namespace for this parallel test process")
-	Expect(k8sClient.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace()}})).
-		To(Succeed(), "Failed to create testing namespace %s", testNamespace())
-})
-
-var _ = SynchronizedAfterSuite(func(ctx context.Context) {
-	// Run these in each parallel test process after the tests
-	By("deleting the namespace for this parallel test process")
-	Expect(k8sClient.Delete(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace()}}, client.PropagationPolicy(metav1.DeletePropagationForeground))).
-		To(Or(Succeed(), MatchError(HaveSuffix("%q not found", testNamespace()))), "Failed to delete testing namespace %s", testNamespace())
-}, func() {
-	// Run this once after all tests, not per-test-process
-	if solrOperatorRelease != nil {
-		By("tearing down the test solr operator")
-		stopSolrOperator(solrOperatorRelease)
-	}
+	createOrRecreateNamespace(ctx, testNamespace())
 })
 
 type RetryCommand struct {
diff --git a/tests/e2e/test_utils_test.go b/tests/e2e/test_utils_test.go
index ee503a2..c3a262e 100644
--- a/tests/e2e/test_utils_test.go
+++ b/tests/e2e/test_utils_test.go
@@ -27,15 +27,20 @@
 	"github.com/apache/solr-operator/controllers/util/solr_api"
 	. "github.com/onsi/ginkgo/v2"
 	. "github.com/onsi/gomega"
+	zkApi "github.com/pravega/zookeeper-operator/api/v1beta1"
 	"helm.sh/helm/v3/pkg/action"
 	"helm.sh/helm/v3/pkg/chart/loader"
 	"helm.sh/helm/v3/pkg/cli"
 	"helm.sh/helm/v3/pkg/release"
 	"helm.sh/helm/v3/pkg/storage/driver"
 	corev1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/resource"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/labels"
 	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/rand"
 	"k8s.io/client-go/tools/remotecommand"
+	"k8s.io/utils/pointer"
 	"os"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"strings"
@@ -47,6 +52,10 @@
 
 	solrOperatorReleaseName      = "solr-operator"
 	solrOperatorReleaseNamespace = "solr-operator"
+
+	sharedZookeeperName             = "shared"
+	sharedZookeeperNamespace        = "zk"
+	sharedZookeeperConnectionString = sharedZookeeperName + "-client." + sharedZookeeperNamespace + ":2181"
 )
 
 var (
@@ -66,9 +75,9 @@
 	Expect(found).To(BeTrue(), "Invalid Operator image found in envVar OPERATOR_IMAGE: "+operatorImage)
 	operatorValues := map[string]interface{}{
 		"image": map[string]interface{}{
-			"repostitory": operatorRepo,
-			"tag":         operatorTag,
-			"pullPolicy":  "Never",
+			"repository": operatorRepo,
+			"tag":        operatorTag,
+			"pullPolicy": "Never",
 		},
 	}
 
@@ -99,6 +108,11 @@
 	Expect(err).ToNot(HaveOccurred(), "Failed to install solr-operator via Helm chart")
 	Expect(solrOperatorHelmRelease).ToNot(BeNil(), "Failed to install solr-operator via Helm chart")
 
+	DeferCleanup(func(ctx context.Context) {
+		By("tearing down the test solr operator")
+		stopSolrOperator(solrOperatorHelmRelease)
+	})
+
 	return solrOperatorHelmRelease
 }
 
@@ -113,7 +127,46 @@
 	Expect(err).ToNot(HaveOccurred(), "Failed to uninstall solr-operator release: "+release.Name)
 }
 
+// Run a Zookeeper Cluster to be used for most of the SolrClouds across the e2e tests.
+// This will speed up the tests considerably, as each SolrCloud does not need to wait on Zookeeper to become available.
+func runSharedZookeeperCluster(ctx context.Context) *zkApi.ZookeeperCluster {
+	createOrRecreateNamespace(ctx, sharedZookeeperNamespace)
+
+	zookeeper := &zkApi.ZookeeperCluster{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:      sharedZookeeperName,
+			Namespace: sharedZookeeperNamespace,
+		},
+		Spec: zkApi.ZookeeperClusterSpec{
+			Replicas:    one,
+			StorageType: "Ephemeral",
+		},
+	}
+
+	By("creating the shared ZK cluster")
+	Expect(k8sClient.Create(ctx, zookeeper)).To(Succeed(), "Failed to create shared Zookeeper Cluster")
+
+	By("Waiting for the Zookeeper to come up healthy")
+	zookeeper = expectZookeeperClusterWithChecks(ctx, zookeeper, zookeeper.Name, func(g Gomega, found *zkApi.ZookeeperCluster) {
+		g.Expect(found.Status.ReadyReplicas).To(Equal(found.Spec.Replicas), "The ZookeeperCluster should have all nodes come up healthy")
+	})
+
+	DeferCleanup(func(ctx context.Context) {
+		By("tearing down the shared Zookeeper Cluster")
+		stopSharedZookeeperCluster(ctx, zookeeper)
+	})
+
+	return zookeeper
+}
+
 // Run Solr Operator for e2e testing of resources
+func stopSharedZookeeperCluster(ctx context.Context, sharedZk *zkApi.ZookeeperCluster) {
+	err := k8sClient.Get(ctx, client.ObjectKey{Namespace: sharedZk.Namespace, Name: sharedZk.Name}, sharedZk)
+	if err == nil {
+		Expect(k8sClient.Delete(ctx, sharedZk)).To(Succeed(), "Failed to delete shared Zookeeper Cluster")
+	}
+}
+
 func getEnvWithDefault(envVar string, defaultValue string) string {
 	value := os.Getenv(envVar)
 	if value == "" {
@@ -122,13 +175,42 @@
 	return value
 }
 
-func createAndQueryCollection(solrCloud *solrv1beta1.SolrCloud, collection string, shards int, replicasPerShard int) {
-	createAndQueryCollectionWithGomega(solrCloud, collection, shards, replicasPerShard, Default)
+// Delete the given namespace if it already exists, then create/recreate it
+func createOrRecreateNamespace(ctx context.Context, namespaceName string) {
+	namespace := &corev1.Namespace{}
+	err := k8sClient.Get(ctx, client.ObjectKey{Name: namespaceName}, namespace)
+	if err == nil {
+		By("deleting the existing namespace " + namespaceName)
+		deleteAndWait(ctx, namespace)
+	}
+
+	namespace = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespaceName}}
+	Expect(k8sClient.Create(ctx, namespace)).To(Succeed(), "Failed to create namespace %s", namespaceName)
+
+	DeferCleanup(func(ctx context.Context) {
+		By("tearing down the testingNamespace " + namespaceName)
+		Expect(k8sClient.Delete(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespaceName}}, client.PropagationPolicy(metav1.DeletePropagationForeground))).
+			To(Or(Succeed(), MatchError(HaveSuffix("%q not found", namespaceName))), "Failed to delete testing namespace %s", namespaceName)
+	})
 }
 
-func createAndQueryCollectionWithGomega(solrCloud *solrv1beta1.SolrCloud, collection string, shards int, replicasPerShard int, g Gomega) {
+func createAndQueryCollection(solrCloud *solrv1beta1.SolrCloud, collection string, shards int, replicasPerShard int, nodes ...int) {
+	createAndQueryCollectionWithGomega(solrCloud, collection, shards, replicasPerShard, Default, nodes...)
+}
+
+func createAndQueryCollectionWithGomega(solrCloud *solrv1beta1.SolrCloud, collection string, shards int, replicasPerShard int, g Gomega, nodes ...int) {
 	pod := solrCloud.GetAllSolrPodNames()[0]
 	asyncId := fmt.Sprintf("create-collection-%s-%d-%d", collection, shards, replicasPerShard)
+
+	var nodeSet []string
+	for _, node := range nodes {
+		nodeSet = append(nodeSet, util.SolrNodeName(solrCloud, solrCloud.GetSolrPodName(node)))
+	}
+	createNodeSet := ""
+	if len(nodeSet) > 0 {
+		createNodeSet = "&createNodeSet=" + strings.Join(nodeSet, ",")
+	}
+
 	response, err := runExecForContainer(
 		util.SolrNodeContainer,
 		pod,
@@ -136,11 +218,12 @@
 		[]string{
 			"curl",
 			fmt.Sprintf(
-				"http://localhost:%d/solr/admin/collections?action=CREATE&name=%s&replicationFactor=%d&numShards=%d&async=%s",
+				"http://localhost:%d/solr/admin/collections?action=CREATE&name=%s&replicationFactor=%d&numShards=%d%s&async=%s",
 				solrCloud.Spec.SolrAddressability.PodPort,
 				collection,
 				replicasPerShard,
 				shards,
+				createNodeSet,
 				asyncId),
 		},
 	)
@@ -343,3 +426,40 @@
 
 	return stdout.String(), err
 }
+
+func generateBaseSolrCloud(replicas int) *solrv1beta1.SolrCloud {
+	return &solrv1beta1.SolrCloud{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:      "foo",
+			Namespace: testNamespace(),
+		},
+		Spec: solrv1beta1.SolrCloudSpec{
+			Replicas: pointer.Int32(int32(replicas)),
+			// Set the image to reflect the inputs given via EnvVars.
+			SolrImage: &solrv1beta1.ContainerImage{
+				Repository: strings.Split(solrImage, ":")[0],
+				Tag:        strings.Split(solrImage+":", ":")[1],
+				PullPolicy: corev1.PullIfNotPresent,
+			},
+			// Use the shared Zookeeper by default, with a unique chRoot for this test
+			ZookeeperRef: &solrv1beta1.ZookeeperRef{
+				ConnectionInfo: &solrv1beta1.ZookeeperConnectionInfo{
+					InternalConnectionString: sharedZookeeperConnectionString,
+					ChRoot:                   "/" + rand.String(5),
+				},
+			},
+			// This seems to be the lowest memory & CPU that allow the tests to pass
+			SolrJavaMem: "-Xms512m -Xmx512m",
+			CustomSolrKubeOptions: solrv1beta1.CustomSolrKubeOptions{
+				PodOptions: &solrv1beta1.PodOptions{
+					Resources: corev1.ResourceRequirements{
+						Requests: corev1.ResourceList{
+							corev1.ResourceMemory: resource.MustParse("600Mi"),
+							corev1.ResourceCPU:    resource.MustParse("1"),
+						},
+					},
+				},
+			},
+		},
+	}
+}