public class RewriteManifestsSparkAction extends java.lang.Object implements RewriteManifests
By default, this action rewrites all manifests for the current partition spec and writes the
result to the metadata folder. The behavior can be modified by passing a custom predicate to
rewriteIf(Predicate)
and a custom spec ID to specId(int)
. In addition, there is
a way to configure a custom location for staged manifests via stagingLocation(String)
.
The provided staging location will be ignored if snapshot ID inheritance is enabled. In such
cases, the manifests are always written to the metadata folder and committed without staging.
RewriteManifests.Result
Modifier and Type | Field and Description |
---|---|
protected static org.apache.iceberg.relocated.com.google.common.base.Joiner |
COMMA_JOINER |
protected static org.apache.iceberg.relocated.com.google.common.base.Splitter |
COMMA_SPLITTER |
protected static java.lang.String |
FILE_PATH |
protected static java.lang.String |
LAST_MODIFIED |
protected static java.lang.String |
MANIFEST |
protected static java.lang.String |
MANIFEST_LIST |
protected static java.lang.String |
OTHERS |
protected static java.lang.String |
STATISTICS_FILES |
static java.lang.String |
USE_CACHING |
static boolean |
USE_CACHING_DEFAULT |
Modifier and Type | Method and Description |
---|---|
protected org.apache.spark.sql.Dataset<FileInfo> |
allReachableOtherMetadataFileDS(Table table) |
protected void |
commit(SnapshotUpdate<?> update) |
protected java.util.Map<java.lang.String,java.lang.String> |
commitSummary() |
protected org.apache.spark.sql.Dataset<FileInfo> |
contentFileDS(Table table) |
protected org.apache.spark.sql.Dataset<FileInfo> |
contentFileDS(Table table,
java.util.Set<java.lang.Long> snapshotIds) |
protected org.apache.iceberg.spark.actions.BaseSparkAction.DeleteSummary |
deleteFiles(java.util.concurrent.ExecutorService executorService,
java.util.function.Consumer<java.lang.String> deleteFunc,
java.util.Iterator<FileInfo> files)
Deletes files and keeps track of how many files were removed for each file type.
|
protected org.apache.iceberg.spark.actions.BaseSparkAction.DeleteSummary |
deleteFiles(SupportsBulkOperations io,
java.util.Iterator<FileInfo> files) |
RewriteManifests.Result |
execute()
Executes this action.
|
protected org.apache.spark.sql.Dataset<org.apache.spark.sql.Row> |
loadMetadataTable(Table table,
MetadataTableType type) |
protected org.apache.spark.sql.Dataset<FileInfo> |
manifestDS(Table table) |
protected org.apache.spark.sql.Dataset<FileInfo> |
manifestDS(Table table,
java.util.Set<java.lang.Long> snapshotIds) |
protected org.apache.spark.sql.Dataset<FileInfo> |
manifestListDS(Table table) |
protected org.apache.spark.sql.Dataset<FileInfo> |
manifestListDS(Table table,
java.util.Set<java.lang.Long> snapshotIds) |
protected JobGroupInfo |
newJobGroupInfo(java.lang.String groupId,
java.lang.String desc) |
protected Table |
newStaticTable(TableMetadata metadata,
FileIO io) |
ThisT |
option(java.lang.String name,
java.lang.String value) |
protected java.util.Map<java.lang.String,java.lang.String> |
options() |
ThisT |
options(java.util.Map<java.lang.String,java.lang.String> newOptions) |
protected org.apache.spark.sql.Dataset<FileInfo> |
otherMetadataFileDS(Table table) |
RewriteManifestsSparkAction |
rewriteIf(java.util.function.Predicate<ManifestFile> newPredicate)
Rewrites only manifests that match the given predicate.
|
protected RewriteManifestsSparkAction |
self() |
ThisT |
snapshotProperty(java.lang.String property,
java.lang.String value) |
protected org.apache.spark.sql.SparkSession |
spark() |
protected org.apache.spark.api.java.JavaSparkContext |
sparkContext() |
RewriteManifestsSparkAction |
specId(int specId)
Rewrites manifests for a given spec id.
|
RewriteManifestsSparkAction |
stagingLocation(java.lang.String newStagingLocation)
Passes a location where the staged manifests should be written.
|
protected org.apache.spark.sql.Dataset<FileInfo> |
statisticsFileDS(Table table,
java.util.Set<java.lang.Long> snapshotIds) |
protected <T> T |
withJobGroupInfo(JobGroupInfo info,
java.util.function.Supplier<T> supplier) |
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
snapshotProperty
public static final java.lang.String USE_CACHING
public static final boolean USE_CACHING_DEFAULT
protected static final java.lang.String MANIFEST
protected static final java.lang.String MANIFEST_LIST
protected static final java.lang.String STATISTICS_FILES
protected static final java.lang.String OTHERS
protected static final java.lang.String FILE_PATH
protected static final java.lang.String LAST_MODIFIED
protected static final org.apache.iceberg.relocated.com.google.common.base.Splitter COMMA_SPLITTER
protected static final org.apache.iceberg.relocated.com.google.common.base.Joiner COMMA_JOINER
protected RewriteManifestsSparkAction self()
public RewriteManifestsSparkAction specId(int specId)
RewriteManifests
If not set, defaults to the table's default spec ID.
specId
in interface RewriteManifests
specId
- a spec idpublic RewriteManifestsSparkAction rewriteIf(java.util.function.Predicate<ManifestFile> newPredicate)
RewriteManifests
If not set, all manifests will be rewritten.
rewriteIf
in interface RewriteManifests
newPredicate
- a predicatepublic RewriteManifestsSparkAction stagingLocation(java.lang.String newStagingLocation)
RewriteManifests
If not set, defaults to the table's metadata location.
stagingLocation
in interface RewriteManifests
newStagingLocation
- a staging locationpublic RewriteManifests.Result execute()
Action
execute
in interface Action<RewriteManifests,RewriteManifests.Result>
public ThisT snapshotProperty(java.lang.String property, java.lang.String value)
protected void commit(SnapshotUpdate<?> update)
protected java.util.Map<java.lang.String,java.lang.String> commitSummary()
protected org.apache.spark.sql.SparkSession spark()
protected org.apache.spark.api.java.JavaSparkContext sparkContext()
public ThisT option(java.lang.String name, java.lang.String value)
public ThisT options(java.util.Map<java.lang.String,java.lang.String> newOptions)
protected java.util.Map<java.lang.String,java.lang.String> options()
protected <T> T withJobGroupInfo(JobGroupInfo info, java.util.function.Supplier<T> supplier)
protected JobGroupInfo newJobGroupInfo(java.lang.String groupId, java.lang.String desc)
protected Table newStaticTable(TableMetadata metadata, FileIO io)
protected org.apache.spark.sql.Dataset<FileInfo> contentFileDS(Table table, java.util.Set<java.lang.Long> snapshotIds)
protected org.apache.spark.sql.Dataset<FileInfo> manifestDS(Table table, java.util.Set<java.lang.Long> snapshotIds)
protected org.apache.spark.sql.Dataset<FileInfo> manifestListDS(Table table, java.util.Set<java.lang.Long> snapshotIds)
protected org.apache.spark.sql.Dataset<FileInfo> statisticsFileDS(Table table, java.util.Set<java.lang.Long> snapshotIds)
protected org.apache.spark.sql.Dataset<FileInfo> otherMetadataFileDS(Table table)
protected org.apache.spark.sql.Dataset<FileInfo> allReachableOtherMetadataFileDS(Table table)
protected org.apache.spark.sql.Dataset<org.apache.spark.sql.Row> loadMetadataTable(Table table, MetadataTableType type)
protected org.apache.iceberg.spark.actions.BaseSparkAction.DeleteSummary deleteFiles(java.util.concurrent.ExecutorService executorService, java.util.function.Consumer<java.lang.String> deleteFunc, java.util.Iterator<FileInfo> files)
executorService
- an executor service to use for parallel deletesdeleteFunc
- a delete funcfiles
- an iterator of Spark rows of the structure (path: String, type: String)protected org.apache.iceberg.spark.actions.BaseSparkAction.DeleteSummary deleteFiles(SupportsBulkOperations io, java.util.Iterator<FileInfo> files)