Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Address comments
  • Loading branch information
xuanyuanking committed Jan 9, 2019
commit 4868e82256c08679e081dd9e92d5454056686de8
10 changes: 7 additions & 3 deletions python/pyspark/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -493,9 +493,13 @@ def getStart(split):
return start0 + int((split * size / numSlices)) * step

def f(split, iterator):
# it's an empty iterator here but we need this line for triggering the logic of
# checking END_OF_DATA_SECTION during load iterator in runtime, thus make sure
# worker reuse takes effect. See more details in SPARK-26549.
# it's an empty iterator here but we need this line for triggering the
# logic of signal handling in FramedSerializer.load_stream, for instance,
# SpecialLengths.END_OF_DATA_SECTION in _read_with_length. Since
# FramedSerializer.load_stream produces a generator, the control should
# at least be in that function once. Here we do it by explicitly converting
# the empty iterator to a list, thus make sure worker reuse takes effect.
# See more details in SPARK-26549.
assert len(list(iterator)) == 0
return xrange(getStart(split), getStart(split + 1), step)

Expand Down
17 changes: 9 additions & 8 deletions python/pyspark/tests/test_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

from py4j.protocol import Py4JJavaError

from pyspark.testing.utils import ReusedPySparkTestCase, QuietTest
from pyspark.testing.utils import ReusedPySparkTestCase, PySparkTestCase, QuietTest

if sys.version_info[0] >= 3:
xrange = range
Expand Down Expand Up @@ -144,14 +144,15 @@ def test_with_different_versions_of_python(self):
finally:
self.sc.pythonVer = version


class WorkerReuseTest(PySparkTestCase):

def test_reuse_worker_of_parallelize_xrange(self):
def get_worker_pid(input_rdd):
return input_rdd.map(lambda x: os.getpid()).collect()
rdd = self.sc.parallelize(xrange(20), 20)
worker_pids = get_worker_pid(rdd)
pids = get_worker_pid(rdd)
for pid in pids:
self.assertTrue(pid in worker_pids)
rdd = self.sc.parallelize(xrange(20), 8)
previous_pids = rdd.map(lambda x: os.getpid()).collect()
current_pids = rdd.map(lambda x: os.getpid()).collect()
for pid in current_pids:
self.assertTrue(pid in previous_pids)


if __name__ == "__main__":
Expand Down