-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-13891] [SQL] [TEST] Issue Exceptions when Hitting the Max Iteration Limit in Optimizer and Analyzer #11714
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
01e4cdf
6835704
9180687
b38a21e
d2b84af
fda8025
ac0dccd
6e0018b
0546772
b37a64f
c2a872c
ab6dbd7
4276356
2dab708
0458770
1debdfa
763706d
4de6ec1
9422a4f
52bdf48
1e95df3
fab24cf
8b2e33b
2ee1876
b9f0090
ade6f7e
9fd63d2
5199d49
404214c
c001dd9
59daa48
41d5f64
472a6e3
0fba10a
797aabb
98cab44
f351362
1281f36
23663fe
cbf73b3
c08f561
474df88
23a6cfc
48b2c32
1a59fcf
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
- Loading branch information
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -21,7 +21,8 @@ import scala.collection.JavaConverters._ | |
|
|
||
| import com.google.common.util.concurrent.AtomicLongMap | ||
|
|
||
| import org.apache.spark.{Logging, SparkException} | ||
| import org.apache.spark.Logging | ||
| import org.apache.spark.sql.catalyst.errors.TreeNodeException | ||
| import org.apache.spark.sql.catalyst.trees.TreeNode | ||
| import org.apache.spark.sql.catalyst.util.sideBySide | ||
| import org.apache.spark.util.Utils | ||
|
|
@@ -51,18 +52,18 @@ abstract class RuleExecutor[TreeType <: TreeNode[_]] extends Logging { | |
| */ | ||
| abstract class Strategy { | ||
| def maxIterations: Int | ||
| def throws: Boolean | ||
| def throwsExceptionUponMaxIterations: Boolean | ||
| } | ||
|
|
||
| /** A strategy that only runs once. */ | ||
| case object Once extends Strategy { | ||
| val maxIterations = 1 | ||
| val throws = false | ||
| override val maxIterations = 1 | ||
| override val throwsExceptionUponMaxIterations = false | ||
| } | ||
|
|
||
| /** A strategy that runs until fix point or maxIterations times, whichever comes first. */ | ||
| case class FixedPoint(maxIterations: Int) extends Strategy { | ||
| override val throws: Boolean = if (Utils.isTesting) true else false | ||
| override val throwsExceptionUponMaxIterations: Boolean = if (Utils.isTesting) true else false | ||
|
||
| } | ||
| /** A batch of rules. */ | ||
| protected case class Batch(name: String, strategy: Strategy, rules: Rule[TreeType]*) | ||
|
|
@@ -108,7 +109,11 @@ abstract class RuleExecutor[TreeType <: TreeNode[_]] extends Logging { | |
| // Only log if this is a rule that is supposed to run more than once. | ||
| if (iteration != 2) { | ||
| val msg = s"Max iterations (${iteration - 1}) reached for batch ${batch.name}" | ||
| if (batch.strategy.throws) throw new SparkException(msg) else logTrace(msg) | ||
| if (batch.strategy.throwsExceptionUponMaxIterations) { | ||
| throw new TreeNodeException(curPlan, msg, null) | ||
| } else { | ||
| logTrace(msg) | ||
| } | ||
| } | ||
| continue = false | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -26,6 +26,9 @@ import org.apache.spark.sql.catalyst.util._ | |
| * Provides helper methods for comparing plans. | ||
| */ | ||
| abstract class PlanTest extends SparkFunSuite with PredicateHelper { | ||
|
|
||
| System.setProperty("spark.testing", "true") | ||
|
||
|
|
||
| /** | ||
| * Since attribute references are given globally unique ids during analysis, | ||
| * we must normalize them to check if two different queries are identical. | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Isn't this kind of janky to insert a field into the non-test code just to get the behavior you want in a test? how about mocking subclasses?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@srowen This is used in multiple components, e.g.,
AnalyzerandOptimizer. I am not sure how to do it in a more efficient way. Could you give me some hints how we did in the Spark? Any example in the existing code base? Thanks!