some more Akka to Pekko renaming (#129)
* some more Akka to Pekko renaming
* revert some couchbase changes
diff --git a/cassandra/src/test/resources/logback-test.xml b/cassandra/src/test/resources/logback-test.xml
index f9d204e..d281a6e 100644
--- a/cassandra/src/test/resources/logback-test.xml
+++ b/cassandra/src/test/resources/logback-test.xml
@@ -4,14 +4,14 @@
<file>target/cassandra.log</file>
<append>false</append>
<encoder>
- <pattern>%d{ISO8601} %-5level [%thread] [%logger{36}] [%X{akkaSource}] %msg%n</pattern>
+ <pattern>%d{ISO8601} %-5level [%thread] [%logger{36}] [%X{pekkoSource}] %msg%n</pattern>
</encoder>
</appender>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<target>System.out</target>
<encoder>
- <pattern>%d{HH:mm:ss.SSS} %-5level [%-20.20thread] %-36.36logger{36} [%X{akkaSource}] %msg%n%rEx</pattern>
+ <pattern>%d{HH:mm:ss.SSS} %-5level [%-20.20thread] %-36.36logger{36} [%X{pekkoSource}] %msg%n%rEx</pattern>
</encoder>
</appender>
diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/FileTailSource.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/FileTailSource.scala
index 7245751..106ccfa 100644
--- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/FileTailSource.scala
+++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/FileTailSource.scala
@@ -33,7 +33,7 @@
* Scala API: Read the entire contents of a file, and then when the end is reached, keep reading
* newly appended data. Like the unix command `tail -f`.
*
- * Aborting the stage can be done by combining with a [[akka.stream.KillSwitch]]
+ * Aborting the stage can be done by combining with a [[pekko.stream.KillSwitch]]
*
* @param path a file path to tail
* @param maxChunkSize The max emitted size of the `ByteString`s
@@ -53,7 +53,7 @@
*
* If a line is longer than `maxChunkSize` the stream will fail.
*
- * Aborting the stage can be done by combining with a [[akka.stream.KillSwitch]]
+ * Aborting the stage can be done by combining with a [[pekko.stream.KillSwitch]]
*
* @param path a file path to tail
* @param maxLineSize The max emitted size of the `ByteString`s
diff --git a/file/src/test/scala/docs/scaladsl/ArchiveSpec.scala b/file/src/test/scala/docs/scaladsl/ArchiveSpec.scala
index 9708d43..5d8c2fb 100644
--- a/file/src/test/scala/docs/scaladsl/ArchiveSpec.scala
+++ b/file/src/test/scala/docs/scaladsl/ArchiveSpec.scala
@@ -52,12 +52,12 @@
val sources = Source.empty
val zipFlow = Archive.zip()
- val akkaZipped: Future[ByteString] =
+ val pekkoZipped: Future[ByteString] =
sources
.via(zipFlow)
.runWith(Sink.fold(ByteString.empty)(_ ++ _))
- akkaZipped.futureValue shouldBe ByteString.empty
+ pekkoZipped.futureValue shouldBe ByteString.empty
}
"archive file" in {
@@ -113,12 +113,12 @@
val inputStream = filesToStream(inputFiles)
val zipFlow = Archive.zip()
- val akkaZipped: Future[ByteString] =
+ val pekkoZipped: Future[ByteString] =
inputStream
.via(zipFlow)
.runWith(Sink.fold(ByteString.empty)(_ ++ _))
- archiveHelper.unzip(akkaZipped.futureValue).asScala shouldBe inputFiles
+ archiveHelper.unzip(pekkoZipped.futureValue).asScala shouldBe inputFiles
}
"unarchive files" in {
@@ -126,7 +126,7 @@
val inputStream = filesToStream(inputFiles)
val zipFlow = Archive.zip()
- val akkaZipped: Future[ByteString] =
+ val pekkoZipped: Future[ByteString] =
inputStream
.via(zipFlow)
.runWith(Sink.fold(ByteString.empty)(_ ++ _))
@@ -136,7 +136,7 @@
File.createTempFile("pre", "post")
zipFile.deleteOnExit()
- Source.future(akkaZipped).runWith(FileIO.toPath(zipFile.toPath)).futureValue
+ Source.future(pekkoZipped).runWith(FileIO.toPath(zipFile.toPath)).futureValue
Archive
.zipReader(zipFile)
diff --git a/file/src/test/scala/docs/scaladsl/TarArchiveSpec.scala b/file/src/test/scala/docs/scaladsl/TarArchiveSpec.scala
index b947400..d2f4997 100644
--- a/file/src/test/scala/docs/scaladsl/TarArchiveSpec.scala
+++ b/file/src/test/scala/docs/scaladsl/TarArchiveSpec.scala
@@ -55,12 +55,12 @@
val sources = Source.empty
val tarFlow = Archive.tar()
- val akkaTarred: Future[ByteString] =
+ val pekkoTarred: Future[ByteString] =
sources
.via(tarFlow)
.runWith(collectByteString)
- akkaTarred.futureValue shouldBe ByteString.empty
+ pekkoTarred.futureValue shouldBe ByteString.empty
}
"archive file" in {
diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/javadsl/BigQuery.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/javadsl/BigQuery.scala
index 34942a5..7d8118a 100644
--- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/javadsl/BigQuery.scala
+++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/javadsl/BigQuery.scala
@@ -385,7 +385,7 @@
/**
* Loads data into BigQuery via a series of asynchronous load jobs created at the rate [[pekko.stream.connectors.googlecloud.bigquery.BigQuerySettings.loadJobPerTableQuota]].
* @note WARNING: Pending the resolution of [[https://issuetracker.google.com/176002651 BigQuery issue 176002651]] this method may not work as expected.
- * As a workaround, you can use the config setting `pekko.http.parsing.conflicting-content-type-header-processing-mode = first` with Akka HTTP v10.2.4 or later.
+ * As a workaround, you can use the config setting `pekko.http.parsing.conflicting-content-type-header-processing-mode = first` with Pekko HTTP.
*
* @param datasetId dataset ID of the table to insert into
* @param tableId table ID of the table to insert into
@@ -403,7 +403,7 @@
/**
* Loads data into BigQuery via a series of asynchronous load jobs created at the rate [[pekko.stream.connectors.googlecloud.bigquery.BigQuerySettings.loadJobPerTableQuota]].
* @note WARNING: Pending the resolution of [[https://issuetracker.google.com/176002651 BigQuery issue 176002651]] this method may not work as expected.
- * As a workaround, you can use the config setting `pekko.http.parsing.conflicting-content-type-header-processing-mode = first` with Akka HTTP v10.2.4 or later.
+ * As a workaround, you can use the config setting `pekko.http.parsing.conflicting-content-type-header-processing-mode = first` with Pekko HTTP.
*
* @param datasetId dataset ID of the table to insert into
* @param tableId table ID of the table to insert into
@@ -422,7 +422,7 @@
/**
* Starts a new asynchronous upload job.
* @note WARNING: Pending the resolution of [[https://issuetracker.google.com/176002651 BigQuery issue 176002651]] this method may not work as expected.
- * As a workaround, you can use the config setting `pekko.http.parsing.conflicting-content-type-header-processing-mode = first` with Akka HTTP v10.2.4 or later.
+ * As a workaround, you can use the config setting `pekko.http.parsing.conflicting-content-type-header-processing-mode = first` with Pekko HTTP.
* @see [[https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert BigQuery reference]]
* @see [[https://cloud.google.com/bigquery/docs/reference/api-uploads BigQuery reference]]
*
diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryJobs.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryJobs.scala
index f75f2db..69c969e 100644
--- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryJobs.scala
+++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryJobs.scala
@@ -86,7 +86,7 @@
/**
* Loads data into BigQuery via a series of asynchronous load jobs created at the rate [[pekko.stream.connectors.googlecloud.bigquery.BigQuerySettings.loadJobPerTableQuota]].
* @note WARNING: Pending the resolution of [[https://issuetracker.google.com/176002651 BigQuery issue 176002651]] this method may not work as expected.
- * As a workaround, you can use the config setting `pekko.http.parsing.conflicting-content-type-header-processing-mode = first` with Akka HTTP v10.2.4 or later.
+ * As a workaround, you can use the config setting `pekko.http.parsing.conflicting-content-type-header-processing-mode = first` with Pekko HTTP.
*
* @param datasetId dataset ID of the table to insert into
* @param tableId table ID of the table to insert into
@@ -99,7 +99,7 @@
/**
* Loads data into BigQuery via a series of asynchronous load jobs created at the rate [[pekko.stream.connectors.googlecloud.bigquery.BigQuerySettings.loadJobPerTableQuota]].
* @note WARNING: Pending the resolution of [[https://issuetracker.google.com/176002651 BigQuery issue 176002651]] this method may not work as expected.
- * As a workaround, you can use the config setting `pekko.http.parsing.conflicting-content-type-header-processing-mode = first` with Akka HTTP v10.2.4 or later.
+ * As a workaround, you can use the config setting `pekko.http.parsing.conflicting-content-type-header-processing-mode = first` with Pekko HTTP.
*
* @param datasetId dataset ID of the table to insert into
* @param tableId table ID of the table to insert into
@@ -155,7 +155,7 @@
/**
* Starts a new asynchronous upload job.
* @note WARNING: Pending the resolution of [[https://issuetracker.google.com/176002651 BigQuery issue 176002651]] this method may not work as expected.
- * As a workaround, you can use the config setting `pekko.http.parsing.conflicting-content-type-header-processing-mode = first` with Akka HTTP v10.2.4 or later.
+ * As a workaround, you can use the config setting `pekko.http.parsing.conflicting-content-type-header-processing-mode = first` with Pekko HTTP.
* @see [[https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert BigQuery reference]]
* @see [[https://cloud.google.com/bigquery/docs/reference/api-uploads BigQuery reference]]
*
diff --git a/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbFlowStage.scala b/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbFlowStage.scala
index fbd0b4b..93159f8 100644
--- a/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbFlowStage.scala
+++ b/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbFlowStage.scala
@@ -145,7 +145,7 @@
shape: FlowShape[immutable.Seq[InfluxDbWriteMessage[T, C]], immutable.Seq[InfluxDbWriteResult[T, C]]])
extends InfluxDbLogic(influxDB, in, out, shape) {
- private val mapperHelper: AlpakkaResultMapperHelper = new AlpakkaResultMapperHelper
+ private val mapperHelper: PekkoConnectorsResultMapperHelper = new PekkoConnectorsResultMapperHelper
override protected def write(messages: immutable.Seq[InfluxDbWriteMessage[T, C]]): Unit =
messages
diff --git a/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbSourceStage.scala b/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbSourceStage.scala
index d360ac5..b86249a 100644
--- a/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbSourceStage.scala
+++ b/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbSourceStage.scala
@@ -55,10 +55,10 @@
shape: SourceShape[T])
extends InfluxDbBaseSourceLogic[T](influxDB, query, outlet, shape) {
- var resultMapperHelper: AlpakkaResultMapperHelper = _
+ var resultMapperHelper: PekkoConnectorsResultMapperHelper = _
override def preStart(): Unit = {
- resultMapperHelper = new AlpakkaResultMapperHelper
+ resultMapperHelper = new PekkoConnectorsResultMapperHelper
resultMapperHelper.cacheClassFields(clazz)
super.preStart()
}
diff --git a/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/AlpakkaResultMapperHelper.scala b/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/PekkoConnectorsResultMapperHelper.scala
similarity index 99%
rename from influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/AlpakkaResultMapperHelper.scala
rename to influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/PekkoConnectorsResultMapperHelper.scala
index 11a94c9..7e3e47a 100644
--- a/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/AlpakkaResultMapperHelper.scala
+++ b/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/PekkoConnectorsResultMapperHelper.scala
@@ -32,7 +32,7 @@
* Internal API.
*/
@InternalApi
-private[impl] class AlpakkaResultMapperHelper {
+private[impl] class PekkoConnectorsResultMapperHelper {
val CLASS_FIELD_CACHE: ConcurrentHashMap[String, ConcurrentMap[String, Field]] = new ConcurrentHashMap();
diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConnector.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConnector.scala
index 84227ea..67ac785 100644
--- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConnector.scala
+++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConnector.scala
@@ -263,7 +263,7 @@
protected def closeConnection(connection: jms.Connection): Unit = {
try {
- // deregister exception listener to clear reference from JMS client to the Akka stage
+ // deregister exception listener to clear reference from JMS client to the Pekko stage
connection.setExceptionListener(null)
} catch {
case _: jms.JMSException => // ignore
diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala
index 3e01d5c..d7c8e15 100644
--- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala
+++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala
@@ -21,7 +21,7 @@
import org.apache.pekko
import pekko.Done
import pekko.annotation.InternalApi
-import pekko.japi.{ Pair => AkkaPair }
+import pekko.japi.{ Pair => PekkoPair }
import pekko.stream.connectors.mqtt.streaming.Connect.ProtocolLevel
import pekko.util.ccompat.JavaConverters._
import pekko.util.{ ByteIterator, ByteString, ByteStringBuilder }
@@ -374,7 +374,7 @@
* 3.8 SUBSCRIBE - Subscribe to topics
* http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html
*/
- def this(topicFilters: java.util.List[AkkaPair[String, Integer]]) =
+ def this(topicFilters: java.util.List[PekkoPair[String, Integer]]) =
this(PacketId(0), topicFilters.asScala.toIndexedSeq.map(v => v.first -> ControlPacketFlags(v.second)))
/**
diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/scaladsl/Mqtt.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/scaladsl/Mqtt.scala
index 6788ebd..5d97fc2 100644
--- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/scaladsl/Mqtt.scala
+++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/scaladsl/Mqtt.scala
@@ -66,7 +66,7 @@
new CoupledTerminationBidi))
}
-/** INTERNAL API - taken from Akka streams - perhaps it should be made public */
+/** INTERNAL API - taken from Pekko streams - perhaps it should be made public */
private[scaladsl] class CoupledTerminationBidi[I, O] extends GraphStage[BidiShape[I, I, O, O]] {
val in1: Inlet[I] = Inlet("CoupledCompletion.in1")
val out1: Outlet[I] = Outlet("CoupledCompletion.out1")
diff --git a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala
index 1bb7f7a..fe7d278 100644
--- a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala
+++ b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala
@@ -112,7 +112,7 @@
/**
* In order to minimise the user facing API, the resource lifetime can be managed by an
- * Akka Extension. In that case Akka Extension will make sure that
+ * Pekko Extension. In that case Pekko Extension will make sure that
* there is only one instance of the resource instantiated per Actor System.
*/
final class ResourceExt private (sys: ExtendedActorSystem) extends Extension {
diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequest.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequest.scala
index 07a3328..ab8fc3b 100644
--- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequest.scala
+++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequest.scala
@@ -32,7 +32,7 @@
}
@InternalApi private[impl] object CanonicalRequest {
- private val akkaSyntheticHeaderNames = List(
+ private val pekkoSyntheticHeaderNames = List(
`Raw-Request-URI`.lowercaseName,
`X-Forwarded-For`.lowercaseName,
`Timeout-Access`.lowercaseName,
@@ -44,7 +44,7 @@
.collectFirst { case header if header.is("x-amz-content-sha256") => header.value }
.getOrElse("")
- val signedHeaders = request.headers.filterNot(header => akkaSyntheticHeaderNames.contains(header.lowercaseName()))
+ val signedHeaders = request.headers.filterNot(header => pekkoSyntheticHeaderNames.contains(header.lowercaseName()))
CanonicalRequest(
request.method.value,
diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequestsSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequestsSpec.scala
index 7c7564b..6c30070 100644
--- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequestsSpec.scala
+++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequestsSpec.scala
@@ -470,7 +470,7 @@
val request = HttpRequests.bucketManagementRequest(location, method = HttpMethods.PUT)
- // Date is added by akka by default
+ // Date is added by pekko by default
request.uri.authority.host.toString should equal("bucket.s3.us-east-1.amazonaws.com")
request.entity.contentLengthOption should equal(Some(0))
request.uri.queryString() should equal(None)
@@ -482,7 +482,7 @@
val request = HttpRequests.bucketManagementRequest(location, method = HttpMethods.DELETE)
- // Date is added by akka by default
+ // Date is added by pekko by default
request.uri.authority.host.toString should equal("bucket.s3.us-east-1.amazonaws.com")
request.entity.contentLengthOption should equal(Some(0))
request.uri.queryString() should equal(None)
@@ -494,7 +494,7 @@
val request: HttpRequest = HttpRequests.bucketManagementRequest(location, method = HttpMethods.HEAD)
- // Date is added by akka by default
+ // Date is added by pekko by default
request.uri.authority.host.toString should equal("bucket.s3.us-east-1.amazonaws.com")
request.entity.contentLengthOption should equal(Some(0))
request.uri.queryString() should equal(None)
diff --git a/slick/src/main/scala/org/apache/pekko/stream/connectors/slick/javadsl/Slick.scala b/slick/src/main/scala/org/apache/pekko/stream/connectors/slick/javadsl/Slick.scala
index e471e64..ad32aba 100644
--- a/slick/src/main/scala/org/apache/pekko/stream/connectors/slick/javadsl/Slick.scala
+++ b/slick/src/main/scala/org/apache/pekko/stream/connectors/slick/javadsl/Slick.scala
@@ -74,7 +74,7 @@
*/
def flow[T](
session: SlickSession,
- toStatement: JFunction[T, String] // TODO: or use the akka japi Function2 interface?
+ toStatement: JFunction[T, String] // TODO: or use the pekko japi Function2 interface?
): Flow[T, java.lang.Integer, NotUsed] =
flow(session, 1, toStatement)
@@ -267,7 +267,7 @@
*/
def sink[T](
session: SlickSession,
- toStatement: JFunction[T, String] // TODO: or use the akka japi Function2 interface?
+ toStatement: JFunction[T, String] // TODO: or use the pekko japi Function2 interface?
): Sink[T, CompletionStage[Done]] =
sink(session, 1, toStatement)