@@ -136,7 +136,7 @@ public HoodieTableSource(
136136 List <String > partitionKeys ,
137137 String defaultPartName ,
138138 Configuration conf ) {
139- this (schema , path , partitionKeys , defaultPartName , conf , null , null , null , null );
139+ this (schema , path , partitionKeys , defaultPartName , conf , null , null , null , null , null );
140140 }
141141
142142 public HoodieTableSource (
@@ -148,7 +148,8 @@ public HoodieTableSource(
148148 @ Nullable List <Map <String , String >> requiredPartitions ,
149149 @ Nullable int [] requiredPos ,
150150 @ Nullable Long limit ,
151- @ Nullable List <ResolvedExpression > filters ) {
151+ @ Nullable List <ResolvedExpression > filters ,
152+ @ Nullable HoodieTableMetaClient metaClient ) {
152153 this .schema = schema ;
153154 this .tableRowType = (RowType ) schema .toPhysicalRowDataType ().notNull ().getLogicalType ();
154155 this .path = path ;
@@ -162,7 +163,7 @@ public HoodieTableSource(
162163 this .limit = limit == null ? NO_LIMIT_CONSTANT : limit ;
163164 this .filters = filters == null ? Collections .emptyList () : filters ;
164165 this .hadoopConf = HadoopConfigurations .getHadoopConf (conf );
165- this .metaClient = StreamerUtil .metaClientForReader (conf , hadoopConf );
166+ this .metaClient = Optional . ofNullable ( metaClient ). orElse ( StreamerUtil .metaClientForReader (conf , hadoopConf ) );
166167 this .fileIndex = FileIndex .instance (this .path , this .conf , this .tableRowType );
167168 this .maxCompactionMemoryInBytes = StreamerUtil .getMaxCompactionMemoryInBytes (conf );
168169 }
@@ -212,7 +213,7 @@ public ChangelogMode getChangelogMode() {
212213 @ Override
213214 public DynamicTableSource copy () {
214215 return new HoodieTableSource (schema , path , partitionKeys , defaultPartName ,
215- conf , requiredPartitions , requiredPos , limit , filters );
216+ conf , requiredPartitions , requiredPos , limit , filters , metaClient );
216217 }
217218
218219 @ Override
0 commit comments