summaryrefslogtreecommitdiff
path: root/src/cuchaz/enigma/analysis/SourceIndex.java
blob: ad94cf0006886148002de72cfdbf33e82bf00bea (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
/*******************************************************************************
 * Copyright (c) 2014 Jeff Martin.
 * All rights reserved. This program and the accompanying materials
 * are made available under the terms of the GNU Public License v3.0
 * which accompanies this distribution, and is available at
 * http://www.gnu.org/licenses/gpl.html
 * 
 * Contributors:
 *     Jeff Martin - initial API and implementation
 ******************************************************************************/
package cuchaz.enigma.analysis;

import java.util.List;
import java.util.Map;
import java.util.TreeMap;

import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.strobel.decompiler.languages.Region;
import com.strobel.decompiler.languages.java.ast.AstNode;

import cuchaz.enigma.mapping.Entry;

public class SourceIndex
{
	private String m_source;
	private TreeMap<Token,Entry> m_tokens;
	private Map<Entry,Token> m_declarations;
	private List<Integer> m_lineOffsets;
	
	public SourceIndex( String source )
	{
		m_source = source;
		m_tokens = Maps.newTreeMap();
		m_declarations = Maps.newHashMap();
		m_lineOffsets = Lists.newArrayList();
		
		// count the lines
		m_lineOffsets.add( 0 );
		for( int i=0; i<source.length(); i++ )
		{
			if( source.charAt( i ) == '\n' )
			{
				m_lineOffsets.add( i + 1 );
			}
		}
	}
	
	public String getSource( )
	{
		return m_source;
	}
	
	public Token getToken( AstNode node )
	{
		// get a token for this node's region
		Region region = node.getRegion();
		if( region.getBeginLine() == 0 || region.getEndLine() == 0 )
		{
			throw new IllegalArgumentException( "Invalid region: " + region );
		}
		Token token = new Token(
			toPos( region.getBeginLine(), region.getBeginColumn() ),
			toPos( region.getEndLine(), region.getEndColumn() )
		);
		
		// HACKHACK: sometimes node regions are off by one
		// I think this is a bug in Procyon, but it's easy to work around
		if( !Character.isJavaIdentifierStart( m_source.charAt( token.start ) ) )
		{
			token.start++;
			token.end++;
			if( !Character.isJavaIdentifierStart( m_source.charAt( token.start ) ) )
			{
				throw new IllegalArgumentException( "Region " + region + " does not describe valid token: '" + m_source.substring( token.start, token.end ) + "'" );
			}
		}
		
		return token;
	}
	
	public void add( AstNode node, Entry entry )
	{
		m_tokens.put( getToken( node ), entry );
	}
	
	public void addDeclaration( AstNode node, Entry entry )
	{
		Token token = getToken( node );
		m_tokens.put( token, entry );
		m_declarations.put( entry, token );
	}
	
	public Token getToken( int pos )
	{
		Map.Entry<Token,Entry> mapEntry = m_tokens.floorEntry( new Token( pos, pos ) );
		if( mapEntry == null )
		{
			return null;
		}
		Token token = mapEntry.getKey();
		if( token.contains( pos ) )
		{
			return token;
		}
		return null;
	}
	
	public Entry getEntry( Token token )
	{
		if( token == null )
		{
			return null;
		}
		return m_tokens.get( token );
	}
	
	public Iterable<Token> tokens( )
	{
		return m_tokens.keySet();
	}
	
	public Token getDeclarationToken( Entry entry )
	{
		return m_declarations.get( entry );
	}
	
	private int toPos( int line, int col )
	{
		// line and col are 1-based
		return m_lineOffsets.get( line - 1 ) + col - 1;
	}
}