-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-20172][Core] Add file permission check when listing files in FsHistoryProvider #17495
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 3 commits
10d254b
63142cc
1d1440b
9447011
4d3838a
ab5117b
b36fa75
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -27,10 +27,11 @@ import scala.xml.Node | |
|
|
||
| import com.google.common.io.ByteStreams | ||
| import com.google.common.util.concurrent.{MoreExecutors, ThreadFactoryBuilder} | ||
| import org.apache.hadoop.fs.{FileStatus, FileSystem, Path} | ||
| import org.apache.hadoop.fs.{FileStatus, Path} | ||
| import org.apache.hadoop.fs.permission.FsAction | ||
| import org.apache.hadoop.hdfs.DistributedFileSystem | ||
| import org.apache.hadoop.hdfs.protocol.HdfsConstants | ||
| import org.apache.hadoop.security.AccessControlException | ||
| import org.apache.hadoop.security.{AccessControlException, UserGroupInformation} | ||
|
|
||
| import org.apache.spark.{SecurityManager, SparkConf, SparkException} | ||
| import org.apache.spark.deploy.SparkHadoopUtil | ||
|
|
@@ -320,14 +321,35 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock) | |
| .filter { entry => | ||
| try { | ||
| val prevFileSize = fileToAppInfo.get(entry.getPath()).map{_.fileSize}.getOrElse(0L) | ||
|
|
||
| def canAccess = { | ||
| val perm = entry.getPermission | ||
| val ugi = UserGroupInformation.getCurrentUser | ||
| val user = ugi.getShortUserName | ||
| val groups = ugi.getGroupNames | ||
|
||
| if (user == entry.getOwner && perm.getUserAction.implies(FsAction.READ)) { | ||
|
||
| true | ||
| } else if (groups.contains(entry.getGroup) && | ||
| perm.getGroupAction.implies(FsAction.READ)) { | ||
| true | ||
| } else if (perm.getOtherAction.implies(FsAction.READ)) { | ||
| true | ||
| } else { | ||
| throw new AccessControlException(s"Permission denied: user=$user, " + | ||
|
||
| s"path=${entry.getPath}:${entry.getOwner}:${entry.getGroup}" + | ||
| s"${if (entry.isDirectory) "d" else "-"}$perm") | ||
| } | ||
| } | ||
|
|
||
| !entry.isDirectory() && | ||
| // FsHistoryProvider generates a hidden file which can't be read. Accidentally | ||
| // reading a garbage file is safe, but we would log an error which can be scary to | ||
| // the end-user. | ||
| !entry.getPath().getName().startsWith(".") && | ||
| prevFileSize < entry.getLen() | ||
| prevFileSize < entry.getLen() && | ||
| canAccess | ||
| } catch { | ||
| case e: AccessControlException => | ||
| case _: AccessControlException => | ||
| // Do not use "logInfo" since these messages can get pretty noisy if printed on | ||
| // every poll. | ||
| logDebug(s"No permission to read $entry, ignoring.") | ||
|
|
@@ -445,7 +467,7 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock) | |
| /** | ||
| * Replay the log files in the list and merge the list of old applications with new ones | ||
| */ | ||
| private def mergeApplicationListing(fileStatus: FileStatus): Unit = { | ||
| protected def mergeApplicationListing(fileStatus: FileStatus): Unit = { | ||
| val newAttempts = try { | ||
| val eventsFilter: ReplayEventsFilter = { eventString => | ||
| eventString.startsWith(APPL_START_EVENT_PREFIX) || | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Return type? Also I'd make this a top-level method (with the entry and action as parameters), maybe even in
SparkHadoopUtil... just to avoid the deeply-nested method declaration. That allows you to easily write a unit test for it (yay!).