Skip to content

Commit e67b80e

Browse files
committed
HDFS-6643. Refactor INodeWithAdditionalFields.PermissionStatusFormat and INodeFile.HeaderFormat.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1609401 13f79535-47bb-0310-9956-ffa450edef68
1 parent 36492f0 commit e67b80e

File tree

7 files changed

+121
-66
lines changed

7 files changed

+121
-66
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -278,6 +278,9 @@ Release 2.6.0 - UNRELEASED
278278
HDFS-6645. Add test for successive Snapshots between XAttr modifications.
279279
(Stephen Chu via jing9)
280280

281+
HDFS-6643. Refactor INodeWithAdditionalFields.PermissionStatusFormat and
282+
INodeFile.HeaderFormat. (szetszwo)
283+
281284
OPTIMIZATIONS
282285

283286
BUG FIXES

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -97,14 +97,12 @@ public final byte[] getLocalNameBytes() {
9797

9898
@Override
9999
public final String getUserName() {
100-
final int n = (int)PermissionStatusFormat.USER.retrieve(permission);
101-
return SerialNumberManager.INSTANCE.getUser(n);
100+
return PermissionStatusFormat.getUser(permission);
102101
}
103102

104103
@Override
105104
public final String getGroupName() {
106-
final int n = (int)PermissionStatusFormat.GROUP.retrieve(permission);
107-
return SerialNumberManager.INSTANCE.getGroup(n);
105+
return PermissionStatusFormat.getGroup(permission);
108106
}
109107

110108
@Override
@@ -114,7 +112,7 @@ public final FsPermission getFsPermission() {
114112

115113
@Override
116114
public final short getFsPermissionShort() {
117-
return (short)PermissionStatusFormat.MODE.retrieve(permission);
115+
return PermissionStatusFormat.getMode(permission);
118116
}
119117

120118
@Override

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java

Lines changed: 20 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
4040
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
4141
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
42+
import org.apache.hadoop.hdfs.util.LongBitFormat;
4243

4344
import com.google.common.annotations.VisibleForTesting;
4445
import com.google.common.base.Preconditions;
@@ -71,37 +72,29 @@ public static INodeFile valueOf(INode inode, String path, boolean acceptNull)
7172
}
7273

7374
/** Format: [16 bits for replication][48 bits for PreferredBlockSize] */
74-
static class HeaderFormat {
75-
/** Number of bits for Block size */
76-
static final int BLOCKBITS = 48;
77-
/** Header mask 64-bit representation */
78-
static final long HEADERMASK = 0xffffL << BLOCKBITS;
79-
static final long MAX_BLOCK_SIZE = ~HEADERMASK;
80-
81-
static short getReplication(long header) {
82-
return (short) ((header & HEADERMASK) >> BLOCKBITS);
75+
static enum HeaderFormat {
76+
PREFERRED_BLOCK_SIZE(null, 48, 1),
77+
REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 16, 1);
78+
79+
private final LongBitFormat BITS;
80+
81+
private HeaderFormat(LongBitFormat previous, int length, long min) {
82+
BITS = new LongBitFormat(name(), previous, length, min);
8383
}
8484

85-
static long combineReplication(long header, short replication) {
86-
if (replication <= 0) {
87-
throw new IllegalArgumentException(
88-
"Unexpected value for the replication: " + replication);
89-
}
90-
return ((long)replication << BLOCKBITS) | (header & MAX_BLOCK_SIZE);
85+
static short getReplication(long header) {
86+
return (short)REPLICATION.BITS.retrieve(header);
9187
}
92-
88+
9389
static long getPreferredBlockSize(long header) {
94-
return header & MAX_BLOCK_SIZE;
90+
return PREFERRED_BLOCK_SIZE.BITS.retrieve(header);
9591
}
9692

97-
static long combinePreferredBlockSize(long header, long blockSize) {
98-
if (blockSize < 0) {
99-
throw new IllegalArgumentException("Block size < 0: " + blockSize);
100-
} else if (blockSize > MAX_BLOCK_SIZE) {
101-
throw new IllegalArgumentException("Block size = " + blockSize
102-
+ " > MAX_BLOCK_SIZE = " + MAX_BLOCK_SIZE);
103-
}
104-
return (header & HEADERMASK) | (blockSize & MAX_BLOCK_SIZE);
93+
static long toLong(long preferredBlockSize, short replication) {
94+
long h = 0;
95+
h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
96+
h = REPLICATION.BITS.combine(replication, h);
97+
return h;
10598
}
10699
}
107100

@@ -113,8 +106,7 @@ static long combinePreferredBlockSize(long header, long blockSize) {
113106
long atime, BlockInfo[] blklist, short replication,
114107
long preferredBlockSize) {
115108
super(id, name, permissions, mtime, atime);
116-
header = HeaderFormat.combineReplication(header, replication);
117-
header = HeaderFormat.combinePreferredBlockSize(header, preferredBlockSize);
109+
header = HeaderFormat.toLong(preferredBlockSize, replication);
118110
this.blocks = blklist;
119111
}
120112

@@ -347,7 +339,7 @@ public short getBlockReplication() {
347339

348340
/** Set the replication factor of this file. */
349341
public final void setFileReplication(short replication) {
350-
header = HeaderFormat.combineReplication(header, replication);
342+
header = HeaderFormat.REPLICATION.BITS.combine(replication, header);
351343
}
352344

353345
/** Set the replication factor of this file. */

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,9 +48,7 @@ public SnapshotCopy(byte[] name, PermissionStatus permissions,
4848
short replication, long preferredBlockSize, XAttrFeature xAttrsFeature) {
4949
super(name, permissions, aclFeature, modificationTime, accessTime,
5050
xAttrsFeature);
51-
52-
final long h = HeaderFormat.combineReplication(0L, replication);
53-
header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize);
51+
header = HeaderFormat.toLong(preferredBlockSize, replication);
5452
}
5553

5654
public SnapshotCopy(INodeFile file) {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java

Lines changed: 26 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,8 @@
2121
import org.apache.hadoop.fs.permission.FsPermission;
2222
import org.apache.hadoop.fs.permission.PermissionStatus;
2323
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
24-
import org.apache.hadoop.hdfs.server.namenode.INode.Feature;
2524
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
26-
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
25+
import org.apache.hadoop.hdfs.util.LongBitFormat;
2726
import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
2827

2928
import com.google.common.base.Preconditions;
@@ -36,39 +35,41 @@
3635
public abstract class INodeWithAdditionalFields extends INode
3736
implements LinkedElement {
3837
static enum PermissionStatusFormat {
39-
MODE(0, 16),
40-
GROUP(MODE.OFFSET + MODE.LENGTH, 25),
41-
USER(GROUP.OFFSET + GROUP.LENGTH, 23);
42-
43-
final int OFFSET;
44-
final int LENGTH; //bit length
45-
final long MASK;
46-
47-
PermissionStatusFormat(int offset, int length) {
48-
OFFSET = offset;
49-
LENGTH = length;
50-
MASK = ((-1L) >>> (64 - LENGTH)) << OFFSET;
38+
MODE(null, 16),
39+
GROUP(MODE.BITS, 25),
40+
USER(GROUP.BITS, 23);
41+
42+
final LongBitFormat BITS;
43+
44+
private PermissionStatusFormat(LongBitFormat previous, int length) {
45+
BITS = new LongBitFormat(name(), previous, length, 0);
5146
}
5247

53-
long retrieve(long record) {
54-
return (record & MASK) >>> OFFSET;
48+
static String getUser(long permission) {
49+
final int n = (int)USER.BITS.retrieve(permission);
50+
return SerialNumberManager.INSTANCE.getUser(n);
5551
}
5652

57-
long combine(long bits, long record) {
58-
return (record & ~MASK) | (bits << OFFSET);
53+
static String getGroup(long permission) {
54+
final int n = (int)GROUP.BITS.retrieve(permission);
55+
return SerialNumberManager.INSTANCE.getGroup(n);
56+
}
57+
58+
static short getMode(long permission) {
59+
return (short)MODE.BITS.retrieve(permission);
5960
}
6061

6162
/** Encode the {@link PermissionStatus} to a long. */
6263
static long toLong(PermissionStatus ps) {
6364
long permission = 0L;
6465
final int user = SerialNumberManager.INSTANCE.getUserSerialNumber(
6566
ps.getUserName());
66-
permission = USER.combine(user, permission);
67+
permission = USER.BITS.combine(user, permission);
6768
final int group = SerialNumberManager.INSTANCE.getGroupSerialNumber(
6869
ps.getGroupName());
69-
permission = GROUP.combine(group, permission);
70+
permission = GROUP.BITS.combine(group, permission);
7071
final int mode = ps.getPermission().toShort();
71-
permission = MODE.combine(mode, permission);
72+
permission = MODE.BITS.combine(mode, permission);
7273
return permission;
7374
}
7475
}
@@ -162,17 +163,15 @@ final PermissionStatus getPermissionStatus(int snapshotId) {
162163
}
163164

164165
private final void updatePermissionStatus(PermissionStatusFormat f, long n) {
165-
this.permission = f.combine(n, permission);
166+
this.permission = f.BITS.combine(n, permission);
166167
}
167168

168169
@Override
169170
final String getUserName(int snapshotId) {
170171
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
171172
return getSnapshotINode(snapshotId).getUserName();
172173
}
173-
174-
int n = (int)PermissionStatusFormat.USER.retrieve(permission);
175-
return SerialNumberManager.INSTANCE.getUser(n);
174+
return PermissionStatusFormat.getUser(permission);
176175
}
177176

178177
@Override
@@ -186,9 +185,7 @@ final String getGroupName(int snapshotId) {
186185
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
187186
return getSnapshotINode(snapshotId).getGroupName();
188187
}
189-
190-
int n = (int)PermissionStatusFormat.GROUP.retrieve(permission);
191-
return SerialNumberManager.INSTANCE.getGroup(n);
188+
return PermissionStatusFormat.getGroup(permission);
192189
}
193190

194191
@Override
@@ -208,7 +205,7 @@ final FsPermission getFsPermission(int snapshotId) {
208205

209206
@Override
210207
public final short getFsPermissionShort() {
211-
return (short)PermissionStatusFormat.MODE.retrieve(permission);
208+
return PermissionStatusFormat.getMode(permission);
212209
}
213210
@Override
214211
void setPermission(FsPermission permission) {
Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.util;
19+
20+
import java.io.Serializable;
21+
22+
23+
/**
24+
* Bit format in a long.
25+
*/
26+
public class LongBitFormat implements Serializable {
27+
private static final long serialVersionUID = 1L;
28+
29+
private final String NAME;
30+
/** Bit offset */
31+
private final int OFFSET;
32+
/** Bit length */
33+
private final int LENGTH;
34+
/** Minimum value */
35+
private final long MIN;
36+
/** Maximum value */
37+
private final long MAX;
38+
/** Bit mask */
39+
private final long MASK;
40+
41+
public LongBitFormat(String name, LongBitFormat previous, int length, long min) {
42+
NAME = name;
43+
OFFSET = previous == null? 0: previous.OFFSET + previous.LENGTH;
44+
LENGTH = length;
45+
MIN = min;
46+
MAX = ((-1L) >>> (64 - LENGTH));
47+
MASK = MAX << OFFSET;
48+
}
49+
50+
/** Retrieve the value from the record. */
51+
public long retrieve(long record) {
52+
return (record & MASK) >>> OFFSET;
53+
}
54+
55+
/** Combine the value to the record. */
56+
public long combine(long value, long record) {
57+
if (value < MIN) {
58+
throw new IllegalArgumentException(
59+
"Illagal value: " + NAME + " = " + value + " < MIN = " + MIN);
60+
}
61+
if (value > MAX) {
62+
throw new IllegalArgumentException(
63+
"Illagal value: " + NAME + " = " + value + " > MAX = " + MAX);
64+
}
65+
return (record & ~MASK) | (value << OFFSET);
66+
}
67+
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ public class TestINodeFile {
7777
private final PermissionStatus perm = new PermissionStatus(
7878
"userName", null, FsPermission.getDefault());
7979
private short replication;
80-
private long preferredBlockSize;
80+
private long preferredBlockSize = 1024;
8181

8282
INodeFile createINodeFile(short replication, long preferredBlockSize) {
8383
return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,

0 commit comments

Comments
 (0)