Revert "[WIP] Refactor GroupReadSupport to unuse deprecated api (#894)"

Reverting this because it contains backward incompatbile changes.

This reverts commit 48f5195cfb2662f021e928211687192249752818.
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/GroupReadSupport.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/GroupReadSupport.java
index 304a1eb..c49b681 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/GroupReadSupport.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/GroupReadSupport.java
@@ -1,4 +1,4 @@
-/*
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
+ * 
  *   http://www.apache.org/licenses/LICENSE-2.0
- *
+ * 
  * Unless required by applicable law or agreed to in writing,
  * software distributed under the License is distributed on an
  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -24,7 +24,6 @@
 
 import org.apache.parquet.example.data.Group;
 import org.apache.parquet.example.data.simple.convert.GroupRecordConverter;
-import org.apache.parquet.hadoop.api.InitContext;
 import org.apache.parquet.hadoop.api.ReadSupport;
 import org.apache.parquet.io.api.RecordMaterializer;
 import org.apache.parquet.schema.MessageType;
@@ -32,9 +31,11 @@
 public class GroupReadSupport extends ReadSupport<Group> {
 
   @Override
-  public ReadContext init(InitContext context) {
-    String partialSchemaString = context.getConfiguration().get(ReadSupport.PARQUET_READ_SCHEMA);
-    MessageType requestedProjection = getSchemaForRead(context.getFileSchema(), partialSchemaString);
+  public org.apache.parquet.hadoop.api.ReadSupport.ReadContext init(
+      Configuration configuration, Map<String, String> keyValueMetaData,
+      MessageType fileSchema) {
+    String partialSchemaString = configuration.get(ReadSupport.PARQUET_READ_SCHEMA);
+    MessageType requestedProjection = getSchemaForRead(fileSchema, partialSchemaString);
     return new ReadContext(requestedProjection);
   }
 
diff --git a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/GroupReadSupportTest.java b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/GroupReadSupportTest.java
index 9d97c57..2a99a1b 100644
--- a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/GroupReadSupportTest.java
+++ b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/GroupReadSupportTest.java
@@ -1,4 +1,4 @@
-/*
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
+ * 
  *   http://www.apache.org/licenses/LICENSE-2.0
- *
+ * 
  * Unless required by applicable law or agreed to in writing,
  * software distributed under the License is distributed on an
  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -18,9 +18,7 @@
  */
 package org.apache.parquet.hadoop.example;
 
-import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.parquet.hadoop.api.InitContext;
 import org.junit.Test;
 import org.apache.parquet.hadoop.api.ReadSupport;
 import org.apache.parquet.schema.MessageType;
@@ -45,10 +43,10 @@
   public void testInitWithoutSpecifyingRequestSchema() throws Exception {
     GroupReadSupport s = new GroupReadSupport();
     Configuration configuration = new Configuration();
-    Map<String, Set<String>> keyValueMetaData = new HashMap<>();
+    Map<String, String> keyValueMetaData = new HashMap<String, String>();
     MessageType fileSchema = MessageTypeParser.parseMessageType(fullSchemaStr);
 
-    ReadSupport.ReadContext context = s.init(new InitContext(configuration, keyValueMetaData, fileSchema));
+    ReadSupport.ReadContext context = s.init(configuration, keyValueMetaData, fileSchema);
     assertEquals(context.getRequestedSchema(), fileSchema);
   }
 
@@ -56,12 +54,12 @@
   public void testInitWithPartialSchema() {
     GroupReadSupport s = new GroupReadSupport();
     Configuration configuration = new Configuration();
-    Map<String, Set<String>> keyValueMetaData = new HashMap<>();
+    Map<String, String> keyValueMetaData = new HashMap<String, String>();
     MessageType fileSchema = MessageTypeParser.parseMessageType(fullSchemaStr);
     MessageType partialSchema = MessageTypeParser.parseMessageType(partialSchemaStr);
     configuration.set(ReadSupport.PARQUET_READ_SCHEMA, partialSchemaStr);
 
-    ReadSupport.ReadContext context = s.init(new InitContext(configuration, keyValueMetaData, fileSchema));
+    ReadSupport.ReadContext context = s.init(configuration, keyValueMetaData, fileSchema);
     assertEquals(context.getRequestedSchema(), partialSchema);
   }
 }
diff --git a/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestThriftToParquetFileWriter.java b/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestThriftToParquetFileWriter.java
index 7de4476..e96b226 100644
--- a/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestThriftToParquetFileWriter.java
+++ b/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestThriftToParquetFileWriter.java
@@ -28,7 +28,6 @@
 
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.parquet.column.statistics.*;
-import org.apache.parquet.hadoop.api.InitContext;
 import org.apache.parquet.hadoop.metadata.BlockMetaData;
 import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
 import org.apache.parquet.hadoop.util.ContextUtil;
@@ -329,7 +328,7 @@
     ParquetMetadata readFooter = ParquetFileReader.readFooter(configuration, parquetFilePath);
     MessageType schema = readFooter.getFileMetaData().getSchema();
 
-    readSupport.init(new InitContext(configuration, null, schema));
+    readSupport.init(configuration, null, schema);
     return new ParquetReader<Group>(parquetFilePath, readSupport);
   }