blob: 8632c567aa1095d8e7bcc15771f5a7d63598ee93 [file] [log] [blame]
<FindBugsFilter>
<Match>
<Package name="org.apache.hadoop.record.compiler.generated" />
</Match>
<Match>
<Package name="org.apache.hadoop.hdfs.protocol.proto" />
</Match>
<Match>
<Package name="org.apache.hadoop.hdfs.server.namenode.ha.proto" />
</Match>
<Match>
<Class name="~org.apache.hadoop.hdfs.server.namenode.FsImageProto.*" />
</Match>
<Match>
<Package name="org.apache.hadoop.hdfs.qjournal.protocol" />
</Match>
<Match>
<Bug pattern="EI_EXPOSE_REP" />
</Match>
<Match>
<Bug pattern="EI_EXPOSE_REP2" />
</Match>
<Match>
<Bug pattern="SE_COMPARATOR_SHOULD_BE_SERIALIZABLE" />
</Match>
<Match>
<Bug pattern="SE_BAD_FIELD" />
</Match>
<Match>
<Class name="~.*_jsp" />
<Bug pattern="DLS_DEAD_LOCAL_STORE" />
</Match>
<Match>
<Field name="_jspx_dependants" />
<Bug pattern="UWF_UNWRITTEN_FIELD" />
</Match>
<!--
Inconsistent synchronization for Client.Connection.out is
is intentional to make a connection to be closed instantly.
-->
<Match>
<Class name="org.apache.hadoop.ipc.Client$Connection" />
<Field name="out" />
<Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match>
<!--
Ignore Cross Scripting Vulnerabilities
We have an input quoting filter that protects us.
-->
<Match>
<Bug code="XSS" />
</Match>
<Match>
<Bug code="HRS" />
</Match>
<!--
core changes
-->
<Match>
<Class name="~org.apache.hadoop.*" />
<Bug code="MS" />
</Match>
<!--
getTmpInputStreams is pretty much like a stream constructor.
The newly created streams are not supposed to be closed in the constructor. So ignore
the OBL warning.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl" />
<Method name="getTmpInputStreams" />
<Bug pattern="OBL_UNSATISFIED_OBLIGATION" />
</Match>
<!--
This class exposes stream constructors. The newly created streams are not
supposed to be closed in the constructor. Ignore the OBL warning.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.FileIoProvider$WrappedFileOutputStream" />
<Bug pattern="OBL_UNSATISFIED_OBLIGATION" />
</Match>
<!--
This class exposes stream constructors. The newly created streams are not
supposed to be closed in the constructor. Ignore the OBL warning.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.FileIoProvider$WrappedFileInputStream" />
<Bug pattern="OBL_UNSATISFIED_OBLIGATION" />
</Match>
<!--
This class exposes stream constructors. The newly created streams are not
supposed to be closed in the constructor. Ignore the OBL warning.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.FileIoProvider$WrappedRandomAccessFile" />
<Bug pattern="OBL_UNSATISFIED_OBLIGATION" />
</Match>
<!--
lastAppliedTxid is carefully unsynchronized in the BackupNode in a couple spots.
See the comments in BackupImage for justification.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.server.namenode.FSImage" />
<Field name="lastAppliedTxId" />
<Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match>
<!--
Findbugs doesn't realize that closing a FilterOutputStream pushes the close down to
wrapped streams, too.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.server.namenode.FSImageFormat$Saver" />
<Method name="save" />
<Bug pattern="OS_OPEN_STREAM" />
</Match>
<!--
the 'metrics' member is sometimes used from synchronized blocks, sometimes not,
but it's only reset by test cases, so should be fine
-->
<Match>
<Class name="org.apache.hadoop.hdfs.server.namenode.FSEditLog" />
<Field name="metrics" />
<Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match>
<!--
We use a separate lock to protect modifications to journalSet so that
FSEditLog#selectInputStreams does not need to be a synchronized method.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.server.namenode.FSEditLog" />
<Field name="journalSet" />
<Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match>
<!--
FSEditLog#getTotalSyncCount is not synchronized because this method is
used by metrics. NullPointerException can happen and it is ignored.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.server.namenode.FSEditLog" />
<Field name="editLogStream" />
<Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match>
<!--
FSEditLog#isOpenForWriteWithoutLock and FSEditLog#isSegmentOpenWithoutLock
are not synchronized because these methods are used by metrics.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.server.namenode.FSEditLog" />
<Field name="state" />
<Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match>
<!--
All of the threads which update/increment txid are synchronized,
so make txid volatile instead of AtomicLong.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.server.namenode.FSEditLog" />
<Field name="txid" />
<Bug pattern="VO_VOLATILE_INCREMENT" />
</Match>
<!--
This method isn't performance-critical and is much clearer to write as it's written.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.BlockPoolManager" />
<Method name="doRefreshNamenodes" />
<Bug category="PERFORMANCE" />
</Match>
<!-- Don't complain about System.exit() being called from quit() -->
<Match>
<Class name="org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext" />
<Method name="quit" />
<Bug pattern="DM_EXIT" />
</Match>
<!-- More complex cleanup logic confuses findbugs -->
<Match>
<Class name="org.apache.hadoop.hdfs.qjournal.server.Journal" />
<Method name="getPersistedPaxosData" />
<Bug pattern="NP_NULL_PARAM_DEREF" />
</Match>
<!-- guava 27.0 update: @Nullable is not detected, however it's there -->
<Match>
<Class name="org.apache.hadoop.hdfs.qjournal.server.Journal" />
<Method name="persistPaxosData" />
<Bug pattern="NP_NULL_PARAM_DEREF" />
</Match>
<!-- guava 27.0 update: @Nullable is not detected, however it's there -->
<Match>
<Class name="org.apache.hadoop.hdfs.qjournal.server.JournalNode" />
<Method name="getLogDir" />
<Bug pattern="NP_NULL_PARAM_DEREF" />
</Match>
<!-- guava 27.0 update: redundant nullcheck (assert for not null) -->
<Match>
<Class name="org.apache.hadoop.hdfs.qjournal.server.JournalNode" />
<Method name="getLogDir" />
<Bug pattern="RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE" />
</Match>
<!-- Don't complain about LocalDatanodeInfo's anonymous class -->
<Match>
<Class name="org.apache.hadoop.hdfs.client.impl.BlockReaderLocal$LocalDatanodeInfo$1" />
<Bug pattern="SE_BAD_FIELD_INNER_CLASS" />
</Match>
<!-- Only one method increments numFailedVolumes and it is synchronized -->
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeList" />
<Field name="numFailedVolumes" />
<Bug pattern="VO_VOLATILE_INCREMENT" />
</Match>
<!-- Access to pendingReceivedRequests is synchronized -->
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.BPServiceActor" />
<Method name="notifyNamenodeBlockImmediately" />
<Field name="pendingReceivedRequests" />
<Bug pattern="VO_VOLATILE_INCREMENT" />
</Match>
<!-- Replace System.exit() call with ExitUtil.terminate() -->
<Match>
<Class name="org.apache.hadoop.hdfs.tools.JMXGet"/>
<Method name="main" />
<Bug pattern="NP_NULL_ON_SOME_PATH" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo" />
<Method name="setDirInternal" />
<Bug pattern="DM_STRING_CTOR" />
</Match>
<!-- Manually verified to be okay, we want to throw away the top bit here -->
<Match>
<Class name="org.apache.hadoop.hdfs.server.namenode.CachedBlock" />
<Method name="getReplication" />
<Bug pattern="ICAST_QUESTIONABLE_UNSIGNED_RIGHT_SHIFT" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.protocol.CacheDirective" />
<Method name="insertInternal" />
<Bug pattern="BC_UNCONFIRMED_CAST" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor" />
<Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.DFSUtil"/>
<Method name="assertAllResultsEqual" />
<Bug pattern="NP_LOAD_OF_KNOWN_NULL_VALUE" />
</Match>
<!-- Manually verified that signed byte value involved in bitwise OR is not negative -->
<Match>
<Class name="org.apache.hadoop.hdfs.server.namenode.INodeFile$HeaderFormat" />
<Method name="getBlockLayoutRedundancy" />
<Bug pattern="BIT_IOR_OF_SIGNED_BYTE" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture" />
<Bug pattern="DLS_DEAD_STORE_OF_CLASS_LITERAL" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture" />
<Bug pattern="DLS_DEAD_LOCAL_STORE" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture" />
<Bug pattern="NS_DANGEROUS_NON_SHORT_CIRCUIT" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil$1" />
<Method name="visitFile" />
<Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil$1" />
<Method name="visitFile" />
<Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
</Match>
<!-- Ignore warnings for not changing the startup option parsing behavior. -->
<Match>
<Class name="org.apache.hadoop.hdfs.server.common.HdfsServerConstants$StartupOption" />
<Method name="setClusterId" />
<Bug pattern="ME_ENUM_FIELD_SETTER" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.common.HdfsServerConstants$StartupOption" />
<Method name="setForce" />
<Bug pattern="ME_ENUM_FIELD_SETTER" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.common.HdfsServerConstants$StartupOption" />
<Method name="setForceFormat" />
<Bug pattern="ME_ENUM_FIELD_SETTER" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.common.HdfsServerConstants$StartupOption" />
<Method name="setInteractiveFormat" />
<Bug pattern="ME_ENUM_FIELD_SETTER" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.DirectoryScanner" />
<Method name="reconcile" />
<Bug pattern="SWL_SLEEP_WITH_LOCK_HELD" />
</Match>
<!--
conversionPattern is only set once and used to initiate PatternLayout object
only once. It is set by log4j framework if set as part of log4j properties and accessed
only during first append operation.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.util.AsyncRFAAppender"/>
<Field name="conversionPattern"/>
<Bug pattern="IS2_INCONSISTENT_SYNC"/>
</Match>
</FindBugsFilter>