dashboard, p2p, vendor: visualize peers (#19247)
* dashboard, p2p: visualize peers * dashboard: change scale to green to red
This commit is contained in:
parent
1591b63306
commit
1a29bf0ee2
|
@ -42,6 +42,7 @@ profile.cov
|
|||
/dashboard/assets/node_modules
|
||||
/dashboard/assets/stats.json
|
||||
/dashboard/assets/bundle.js
|
||||
/dashboard/assets/bundle.js.map
|
||||
/dashboard/assets/package-lock.json
|
||||
|
||||
**/yarn-error.log
|
||||
|
|
79308
dashboard/assets.go
79308
dashboard/assets.go
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,3 @@
|
|||
node_modules/* #ignored by default
|
||||
flow-typed/*
|
||||
bundle.js
|
|
@ -16,71 +16,66 @@
|
|||
|
||||
// React syntax style mostly according to https://github.com/airbnb/javascript/tree/master/react
|
||||
{
|
||||
'env': {
|
||||
'browser': true,
|
||||
'node': true,
|
||||
'es6': true,
|
||||
"env": {
|
||||
"browser": true,
|
||||
"node": true,
|
||||
"es6": true
|
||||
},
|
||||
'parser': 'babel-eslint',
|
||||
'parserOptions': {
|
||||
'sourceType': 'module',
|
||||
'ecmaVersion': 6,
|
||||
'ecmaFeatures': {
|
||||
'jsx': true,
|
||||
"parser": "babel-eslint",
|
||||
"parserOptions": {
|
||||
"sourceType": "module",
|
||||
"ecmaVersion": 6,
|
||||
"ecmaFeatures": {
|
||||
"jsx": true
|
||||
}
|
||||
},
|
||||
'extends': 'airbnb',
|
||||
'plugins': [
|
||||
'flowtype',
|
||||
'react',
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
"airbnb",
|
||||
"plugin:flowtype/recommended",
|
||||
"plugin:react/recommended"
|
||||
],
|
||||
'rules': {
|
||||
'no-tabs': 'off',
|
||||
'indent': ['error', 'tab'],
|
||||
'react/jsx-indent': ['error', 'tab'],
|
||||
'react/jsx-indent-props': ['error', 'tab'],
|
||||
'react/prefer-stateless-function': 'off',
|
||||
'jsx-quotes': ['error', 'prefer-single'],
|
||||
'no-plusplus': 'off',
|
||||
'no-console': ['error', { allow: ['error'] }],
|
||||
|
||||
"plugins": [
|
||||
"flowtype",
|
||||
"react"
|
||||
],
|
||||
"rules": {
|
||||
"no-tabs": "off",
|
||||
"indent": ["error", "tab"],
|
||||
"react/jsx-indent": ["error", "tab"],
|
||||
"react/jsx-indent-props": ["error", "tab"],
|
||||
"react/prefer-stateless-function": "off",
|
||||
"react/destructuring-assignment": ["error", "always", {"ignoreClassFields": true}],
|
||||
"jsx-quotes": ["error", "prefer-single"],
|
||||
"no-plusplus": "off",
|
||||
"no-console": ["error", { "allow": ["error"] }],
|
||||
// Specifies the maximum length of a line.
|
||||
'max-len': ['warn', 120, 2, {
|
||||
'ignoreUrls': true,
|
||||
'ignoreComments': false,
|
||||
'ignoreRegExpLiterals': true,
|
||||
'ignoreStrings': true,
|
||||
'ignoreTemplateLiterals': true,
|
||||
"max-len": ["warn", 120, 2, {
|
||||
"ignoreUrls": true,
|
||||
"ignoreComments": false,
|
||||
"ignoreRegExpLiterals": true,
|
||||
"ignoreStrings": true,
|
||||
"ignoreTemplateLiterals": true
|
||||
}],
|
||||
// Enforces consistent spacing between keys and values in object literal properties.
|
||||
'key-spacing': ['error', {'align': {
|
||||
'beforeColon': false,
|
||||
'afterColon': true,
|
||||
'on': 'value'
|
||||
"key-spacing": ["error", {"align": {
|
||||
"beforeColon": false,
|
||||
"afterColon": true,
|
||||
"on": "value"
|
||||
}}],
|
||||
// Prohibits padding inside curly braces.
|
||||
'object-curly-spacing': ['error', 'never'],
|
||||
'no-use-before-define': 'off', // messageAPI
|
||||
'default-case': 'off',
|
||||
|
||||
'flowtype/boolean-style': ['error', 'boolean'],
|
||||
'flowtype/define-flow-type': 'warn',
|
||||
'flowtype/generic-spacing': ['error', 'never'],
|
||||
'flowtype/no-primitive-constructor-types': 'error',
|
||||
'flowtype/no-weak-types': 'error',
|
||||
'flowtype/object-type-delimiter': ['error', 'comma'],
|
||||
'flowtype/require-valid-file-annotation': 'error',
|
||||
'flowtype/semi': ['error', 'always'],
|
||||
'flowtype/space-after-type-colon': ['error', 'always'],
|
||||
'flowtype/space-before-generic-bracket': ['error', 'never'],
|
||||
'flowtype/space-before-type-colon': ['error', 'never'],
|
||||
'flowtype/union-intersection-spacing': ['error', 'always'],
|
||||
'flowtype/use-flow-type': 'warn',
|
||||
'flowtype/valid-syntax': 'warn',
|
||||
"object-curly-spacing": ["error", "never"],
|
||||
"no-use-before-define": "off", // message types
|
||||
"default-case": "off"
|
||||
},
|
||||
'settings': {
|
||||
'flowtype': {
|
||||
'onlyFilesWithFlowAnnotation': true,
|
||||
"settings": {
|
||||
"import/resolver": {
|
||||
"node": {
|
||||
"paths": ["components"] // import './components/Component' -> import 'Component'
|
||||
}
|
||||
},
|
||||
"flowtype": {
|
||||
"onlyFilesWithFlowAnnotation": true
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,3 +7,5 @@ node_modules/jss/flow-typed
|
|||
|
||||
[options]
|
||||
include_warnings=true
|
||||
module.system.node.resolve_dirname=node_modules
|
||||
module.system.node.resolve_dirname=components
|
||||
|
|
|
@ -16,43 +16,46 @@
|
|||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import {faHome, faLink, faGlobeEurope, faTachometerAlt, faList} from '@fortawesome/free-solid-svg-icons';
|
||||
import {faCreditCard} from '@fortawesome/free-regular-svg-icons';
|
||||
|
||||
type ProvidedMenuProp = {|title: string, icon: string|};
|
||||
const menuSkeletons: Array<{|id: string, menu: ProvidedMenuProp|}> = [
|
||||
{
|
||||
id: 'home',
|
||||
menu: {
|
||||
title: 'Home',
|
||||
icon: 'home',
|
||||
icon: faHome,
|
||||
},
|
||||
}, {
|
||||
id: 'chain',
|
||||
menu: {
|
||||
title: 'Chain',
|
||||
icon: 'link',
|
||||
icon: faLink,
|
||||
},
|
||||
}, {
|
||||
id: 'txpool',
|
||||
menu: {
|
||||
title: 'TxPool',
|
||||
icon: 'credit-card',
|
||||
icon: faCreditCard,
|
||||
},
|
||||
}, {
|
||||
id: 'network',
|
||||
menu: {
|
||||
title: 'Network',
|
||||
icon: 'globe',
|
||||
icon: faGlobeEurope,
|
||||
},
|
||||
}, {
|
||||
id: 'system',
|
||||
menu: {
|
||||
title: 'System',
|
||||
icon: 'tachometer',
|
||||
icon: faTachometerAlt,
|
||||
},
|
||||
}, {
|
||||
id: 'logs',
|
||||
menu: {
|
||||
title: 'Logs',
|
||||
icon: 'list',
|
||||
icon: faList,
|
||||
},
|
||||
},
|
||||
];
|
||||
|
@ -64,8 +67,26 @@ export const MENU: Map<string, {...MenuProp}> = new Map(menuSkeletons.map(({id,
|
|||
|
||||
export const DURATION = 200;
|
||||
|
||||
export const chartStrokeWidth = 0.2;
|
||||
|
||||
export const styles = {
|
||||
light: {
|
||||
color: 'rgba(255, 255, 255, 0.54)',
|
||||
},
|
||||
};
|
||||
|
||||
// unit contains the units for the bytePlotter.
|
||||
export const unit = ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'];
|
||||
|
||||
// simplifyBytes returns the simplified version of the given value followed by the unit.
|
||||
export const simplifyBytes = (x: number) => {
|
||||
let i = 0;
|
||||
for (; x > 1024 && i < 8; i++) {
|
||||
x /= 1024;
|
||||
}
|
||||
return x.toFixed(2).toString().concat(' ', unit[i], 'B');
|
||||
};
|
||||
|
||||
// hues contains predefined colors for gradient stop colors.
|
||||
export const hues = ['#00FF00', '#FFFF00', '#FF7F00', '#FF0000'];
|
||||
export const hueScale = [0, 2048, 102400, 2097152];
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
import React, {Component} from 'react';
|
||||
import type {ChildrenArray} from 'react';
|
||||
|
||||
import Grid from 'material-ui/Grid';
|
||||
import Grid from '@material-ui/core/Grid';
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
|
@ -33,7 +33,7 @@ const styles = {
|
|||
flex: 1,
|
||||
padding: 0,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
export type Props = {
|
||||
children: ChildrenArray<React$Element<any>>,
|
||||
|
|
|
@ -18,8 +18,8 @@
|
|||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import Typography from 'material-ui/Typography';
|
||||
import {styles} from '../common';
|
||||
import Typography from '@material-ui/core/Typography';
|
||||
import {styles, simplifyBytes} from '../common';
|
||||
|
||||
// multiplier multiplies a number by another.
|
||||
export const multiplier = <T>(by: number = 1) => (x: number) => x * by;
|
||||
|
@ -37,18 +37,6 @@ export const percentPlotter = <T>(text: string, mapper: (T => T) = multiplier(1)
|
|||
);
|
||||
};
|
||||
|
||||
// unit contains the units for the bytePlotter.
|
||||
const unit = ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'];
|
||||
|
||||
// simplifyBytes returns the simplified version of the given value followed by the unit.
|
||||
const simplifyBytes = (x: number) => {
|
||||
let i = 0;
|
||||
for (; x > 1024 && i < 8; i++) {
|
||||
x /= 1024;
|
||||
}
|
||||
return x.toFixed(2).toString().concat(' ', unit[i], 'B');
|
||||
};
|
||||
|
||||
// bytePlotter renders a tooltip, which displays the payload as a byte value.
|
||||
export const bytePlotter = <T>(text: string, mapper: (T => T) = multiplier(1)) => (payload: T) => {
|
||||
const p = mapper(payload);
|
||||
|
@ -70,7 +58,8 @@ export const bytePerSecPlotter = <T>(text: string, mapper: (T => T) = multiplier
|
|||
}
|
||||
return (
|
||||
<Typography type='caption' color='inherit'>
|
||||
<span style={styles.light}>{text}</span> {simplifyBytes(p)}/s
|
||||
<span style={styles.light}>{text}</span>
|
||||
{simplifyBytes(p)}/s
|
||||
</Typography>
|
||||
);
|
||||
};
|
||||
|
|
|
@ -17,14 +17,16 @@
|
|||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
import {hot} from 'react-hot-loader';
|
||||
|
||||
import withStyles from 'material-ui/styles/withStyles';
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
|
||||
import Header from './Header';
|
||||
import Body from './Body';
|
||||
import Header from 'Header';
|
||||
import Body from 'Body';
|
||||
import {inserter as logInserter, SAME} from 'Logs';
|
||||
import {inserter as peerInserter} from 'Network';
|
||||
import {MENU} from '../common';
|
||||
import type {Content} from '../types/content';
|
||||
import {inserter as logInserter} from './Logs';
|
||||
|
||||
// deepUpdate updates an object corresponding to the given update data, which has
|
||||
// the shape of the same structure as the original object. updater also has the same
|
||||
|
@ -37,7 +39,6 @@ import {inserter as logInserter} from './Logs';
|
|||
// of the update.
|
||||
const deepUpdate = (updater: Object, update: Object, prev: Object): $Shape<Content> => {
|
||||
if (typeof update === 'undefined') {
|
||||
// TODO (kurkomisi): originally this was deep copy, investigate it.
|
||||
return prev;
|
||||
}
|
||||
if (typeof updater === 'function') {
|
||||
|
@ -88,8 +89,13 @@ const defaultContent: () => Content = () => ({
|
|||
home: {},
|
||||
chain: {},
|
||||
txpool: {},
|
||||
network: {},
|
||||
system: {
|
||||
network: {
|
||||
peers: {
|
||||
bundles: {},
|
||||
},
|
||||
diff: [],
|
||||
},
|
||||
system: {
|
||||
activeMemory: [],
|
||||
virtualMemory: [],
|
||||
networkIngress: [],
|
||||
|
@ -103,8 +109,8 @@ const defaultContent: () => Content = () => ({
|
|||
chunks: [],
|
||||
endTop: false,
|
||||
endBottom: true,
|
||||
topChanged: 0,
|
||||
bottomChanged: 0,
|
||||
topChanged: SAME,
|
||||
bottomChanged: SAME,
|
||||
},
|
||||
});
|
||||
|
||||
|
@ -119,7 +125,7 @@ const updaters = {
|
|||
home: null,
|
||||
chain: null,
|
||||
txpool: null,
|
||||
network: null,
|
||||
network: peerInserter(200),
|
||||
system: {
|
||||
activeMemory: appender(200),
|
||||
virtualMemory: appender(200),
|
||||
|
@ -186,8 +192,8 @@ class Dashboard extends Component<Props, State> {
|
|||
// reconnect establishes a websocket connection with the server, listens for incoming messages
|
||||
// and tries to reconnect on connection loss.
|
||||
reconnect = () => {
|
||||
// PROD is defined by webpack.
|
||||
const server = new WebSocket(`${((window.location.protocol === 'https:') ? 'wss://' : 'ws://')}${PROD ? window.location.host : 'localhost:8080'}/api`);
|
||||
const host = process.env.NODE_ENV === 'production' ? window.location.host : 'localhost:8080';
|
||||
const server = new WebSocket(`${((window.location.protocol === 'https:') ? 'wss://' : 'ws://')}${host}/api`);
|
||||
server.onopen = () => {
|
||||
this.setState({content: defaultContent(), shouldUpdate: {}, server});
|
||||
};
|
||||
|
@ -249,4 +255,4 @@ class Dashboard extends Component<Props, State> {
|
|||
}
|
||||
}
|
||||
|
||||
export default withStyles(themeStyles)(Dashboard);
|
||||
export default hot(module)(withStyles(themeStyles)(Dashboard));
|
||||
|
|
|
@ -18,14 +18,19 @@
|
|||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import withStyles from 'material-ui/styles/withStyles';
|
||||
import Typography from 'material-ui/Typography';
|
||||
import Grid from 'material-ui/Grid';
|
||||
import {ResponsiveContainer, AreaChart, Area, Tooltip} from 'recharts';
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
import Typography from '@material-ui/core/Typography';
|
||||
import Grid from '@material-ui/core/Grid';
|
||||
import ResponsiveContainer from 'recharts/es6/component/ResponsiveContainer';
|
||||
import AreaChart from 'recharts/es6/chart/AreaChart';
|
||||
import Area from 'recharts/es6/cartesian/Area';
|
||||
import ReferenceLine from 'recharts/es6/cartesian/ReferenceLine';
|
||||
import Label from 'recharts/es6/component/Label';
|
||||
import Tooltip from 'recharts/es6/component/Tooltip';
|
||||
|
||||
import ChartRow from './ChartRow';
|
||||
import CustomTooltip, {bytePlotter, bytePerSecPlotter, percentPlotter, multiplier} from './CustomTooltip';
|
||||
import {styles as commonStyles} from '../common';
|
||||
import ChartRow from 'ChartRow';
|
||||
import CustomTooltip, {bytePlotter, bytePerSecPlotter, percentPlotter, multiplier} from 'CustomTooltip';
|
||||
import {chartStrokeWidth, styles as commonStyles} from '../common';
|
||||
import type {General, System} from '../types/content';
|
||||
|
||||
const FOOTER_SYNC_ID = 'footerSyncId';
|
||||
|
@ -38,6 +43,15 @@ const TRAFFIC = 'traffic';
|
|||
const TOP = 'Top';
|
||||
const BOTTOM = 'Bottom';
|
||||
|
||||
const cpuLabelTop = 'Process load';
|
||||
const cpuLabelBottom = 'System load';
|
||||
const memoryLabelTop = 'Active memory';
|
||||
const memoryLabelBottom = 'Virtual memory';
|
||||
const diskLabelTop = 'Disk read';
|
||||
const diskLabelBottom = 'Disk write';
|
||||
const trafficLabelTop = 'Download';
|
||||
const trafficLabelBottom = 'Upload';
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
footer: {
|
||||
|
@ -53,6 +67,10 @@ const styles = {
|
|||
height: '100%',
|
||||
width: '99%',
|
||||
},
|
||||
link: {
|
||||
color: 'inherit',
|
||||
textDecoration: 'none',
|
||||
},
|
||||
};
|
||||
|
||||
// themeStyles returns the styles generated from the theme for the component.
|
||||
|
@ -73,18 +91,23 @@ export type Props = {
|
|||
shouldUpdate: Object,
|
||||
};
|
||||
|
||||
type State = {};
|
||||
|
||||
// Footer renders the footer of the dashboard.
|
||||
class Footer extends Component<Props> {
|
||||
shouldComponentUpdate(nextProps) {
|
||||
class Footer extends Component<Props, State> {
|
||||
shouldComponentUpdate(nextProps: Readonly<Props>, nextState: Readonly<State>, nextContext: any) {
|
||||
return typeof nextProps.shouldUpdate.general !== 'undefined' || typeof nextProps.shouldUpdate.system !== 'undefined';
|
||||
}
|
||||
|
||||
// halfHeightChart renders an area chart with half of the height of its parent.
|
||||
halfHeightChart = (chartProps, tooltip, areaProps) => (
|
||||
halfHeightChart = (chartProps, tooltip, areaProps, label, position) => (
|
||||
<ResponsiveContainer width='100%' height='50%'>
|
||||
<AreaChart {...chartProps} >
|
||||
<AreaChart {...chartProps}>
|
||||
{!tooltip || (<Tooltip cursor={false} content={<CustomTooltip tooltip={tooltip} />} />)}
|
||||
<Area isAnimationActive={false} type='monotone' {...areaProps} />
|
||||
<Area isAnimationActive={false} strokeWidth={chartStrokeWidth} type='monotone' {...areaProps} />
|
||||
<ReferenceLine x={0} strokeWidth={0}>
|
||||
<Label fill={areaProps.fill} value={label} position={position} />
|
||||
</ReferenceLine>
|
||||
</AreaChart>
|
||||
</ResponsiveContainer>
|
||||
);
|
||||
|
@ -111,6 +134,8 @@ class Footer extends Component<Props> {
|
|||
},
|
||||
topChart.tooltip,
|
||||
{dataKey: topKey, stroke: topColor, fill: topColor},
|
||||
topChart.label,
|
||||
'insideBottomLeft',
|
||||
)}
|
||||
{this.halfHeightChart(
|
||||
{
|
||||
|
@ -120,6 +145,8 @@ class Footer extends Component<Props> {
|
|||
},
|
||||
bottomChart.tooltip,
|
||||
{dataKey: bottomKey, stroke: bottomColor, fill: bottomColor},
|
||||
bottomChart.label,
|
||||
'insideTopLeft',
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
|
@ -135,37 +162,42 @@ class Footer extends Component<Props> {
|
|||
{this.doubleChart(
|
||||
FOOTER_SYNC_ID,
|
||||
CPU,
|
||||
{data: system.processCPU, tooltip: percentPlotter('Process load')},
|
||||
{data: system.systemCPU, tooltip: percentPlotter('System load', multiplier(-1))},
|
||||
{data: system.processCPU, tooltip: percentPlotter(cpuLabelTop), label: cpuLabelTop},
|
||||
{data: system.systemCPU, tooltip: percentPlotter(cpuLabelBottom, multiplier(-1)), label: cpuLabelBottom},
|
||||
)}
|
||||
{this.doubleChart(
|
||||
FOOTER_SYNC_ID,
|
||||
MEMORY,
|
||||
{data: system.activeMemory, tooltip: bytePlotter('Active memory')},
|
||||
{data: system.virtualMemory, tooltip: bytePlotter('Virtual memory', multiplier(-1))},
|
||||
{data: system.activeMemory, tooltip: bytePlotter(memoryLabelTop), label: memoryLabelTop},
|
||||
{data: system.virtualMemory, tooltip: bytePlotter(memoryLabelBottom, multiplier(-1)), label: memoryLabelBottom},
|
||||
)}
|
||||
{this.doubleChart(
|
||||
FOOTER_SYNC_ID,
|
||||
DISK,
|
||||
{data: system.diskRead, tooltip: bytePerSecPlotter('Disk read')},
|
||||
{data: system.diskWrite, tooltip: bytePerSecPlotter('Disk write', multiplier(-1))},
|
||||
{data: system.diskRead, tooltip: bytePerSecPlotter(diskLabelTop), label: diskLabelTop},
|
||||
{data: system.diskWrite, tooltip: bytePerSecPlotter(diskLabelBottom, multiplier(-1)), label: diskLabelBottom},
|
||||
)}
|
||||
{this.doubleChart(
|
||||
FOOTER_SYNC_ID,
|
||||
TRAFFIC,
|
||||
{data: system.networkIngress, tooltip: bytePerSecPlotter('Download')},
|
||||
{data: system.networkEgress, tooltip: bytePerSecPlotter('Upload', multiplier(-1))},
|
||||
{data: system.networkIngress, tooltip: bytePerSecPlotter(trafficLabelTop), label: trafficLabelTop},
|
||||
{data: system.networkEgress, tooltip: bytePerSecPlotter(trafficLabelBottom, multiplier(-1)), label: trafficLabelBottom},
|
||||
)}
|
||||
</ChartRow>
|
||||
</Grid>
|
||||
<Grid item >
|
||||
<Grid item>
|
||||
<Typography type='caption' color='inherit'>
|
||||
<span style={commonStyles.light}>Geth</span> {general.version}
|
||||
</Typography>
|
||||
{general.commit && (
|
||||
<Typography type='caption' color='inherit'>
|
||||
<span style={commonStyles.light}>{'Commit '}</span>
|
||||
<a href={`https://github.com/ethereum/go-ethereum/commit/${general.commit}`} target='_blank' style={{color: 'inherit', textDecoration: 'none'}} >
|
||||
<a
|
||||
href={`https://github.com/ethereum/go-ethereum/commit/${general.commit}`}
|
||||
target='_blank'
|
||||
rel='noopener noreferrer'
|
||||
style={styles.link}
|
||||
>
|
||||
{general.commit.substring(0, 8)}
|
||||
</a>
|
||||
</Typography>
|
||||
|
|
|
@ -18,13 +18,13 @@
|
|||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import withStyles from 'material-ui/styles/withStyles';
|
||||
import AppBar from 'material-ui/AppBar';
|
||||
import Toolbar from 'material-ui/Toolbar';
|
||||
import IconButton from 'material-ui/IconButton';
|
||||
import Icon from 'material-ui/Icon';
|
||||
import MenuIcon from 'material-ui-icons/Menu';
|
||||
import Typography from 'material-ui/Typography';
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
import AppBar from '@material-ui/core/AppBar';
|
||||
import Toolbar from '@material-ui/core/Toolbar';
|
||||
import IconButton from '@material-ui/core/IconButton';
|
||||
import {FontAwesomeIcon} from '@fortawesome/react-fontawesome';
|
||||
import {faBars} from '@fortawesome/free-solid-svg-icons';
|
||||
import Typography from '@material-ui/core/Typography';
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
|
@ -67,9 +67,7 @@ class Header extends Component<Props> {
|
|||
<AppBar position='static' className={classes.header} style={styles.header}>
|
||||
<Toolbar className={classes.toolbar} style={styles.toolbar}>
|
||||
<IconButton onClick={this.props.switchSideBar}>
|
||||
<Icon>
|
||||
<MenuIcon />
|
||||
</Icon>
|
||||
<FontAwesomeIcon icon={faBars} />
|
||||
</IconButton>
|
||||
<Typography type='title' color='inherit' noWrap className={classes.title}>
|
||||
Go Ethereum Dashboard
|
||||
|
|
|
@ -18,7 +18,8 @@
|
|||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import List, {ListItem} from 'material-ui/List';
|
||||
import List from '@material-ui/core/List';
|
||||
import ListItem from '@material-ui/core/ListItem';
|
||||
import escapeHtml from 'escape-html';
|
||||
import type {Record, Content, LogsMessage, Logs as LogsType} from '../types/content';
|
||||
|
||||
|
@ -104,9 +105,9 @@ const createChunk = (records: Array<Record>) => {
|
|||
|
||||
// ADDED, SAME and REMOVED are used to track the change of the log chunk array.
|
||||
// The scroll position is set using these values.
|
||||
const ADDED = 1;
|
||||
const SAME = 0;
|
||||
const REMOVED = -1;
|
||||
export const ADDED = 1;
|
||||
export const SAME = 0;
|
||||
export const REMOVED = -1;
|
||||
|
||||
// inserter is a state updater function for the main component, which inserts the new log chunk into the chunk array.
|
||||
// limit is the maximum length of the chunk array, used in order to prevent the browser from OOM.
|
||||
|
@ -166,7 +167,7 @@ export const inserter = (limit: number) => (update: LogsMessage, prev: LogsType)
|
|||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
logListItem: {
|
||||
padding: 0,
|
||||
padding: 0,
|
||||
lineHeight: 1.231,
|
||||
},
|
||||
logChunk: {
|
||||
|
@ -251,15 +252,15 @@ class Logs extends Component<Props, State> {
|
|||
// atBottom checks if the scroll position it at the bottom of the container.
|
||||
atBottom = () => {
|
||||
const {container} = this.props;
|
||||
return container.scrollHeight - container.scrollTop <=
|
||||
container.clientHeight + container.scrollHeight * requestBand;
|
||||
return container.scrollHeight - container.scrollTop
|
||||
<= container.clientHeight + container.scrollHeight * requestBand;
|
||||
};
|
||||
|
||||
// beforeUpdate is called by the parent component, saves the previous scroll position
|
||||
// and the height of the first log chunk, which can be deleted during the insertion.
|
||||
beforeUpdate = () => {
|
||||
let firstHeight = 0;
|
||||
let chunkList = this.content.children[1];
|
||||
const chunkList = this.content.children[1];
|
||||
if (chunkList && chunkList.children[0]) {
|
||||
firstHeight = chunkList.children[0].clientHeight;
|
||||
}
|
||||
|
|
|
@ -18,11 +18,12 @@
|
|||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import withStyles from 'material-ui/styles/withStyles';
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
|
||||
import Network from 'Network';
|
||||
import Logs from 'Logs';
|
||||
import Footer from 'Footer';
|
||||
import {MENU} from '../common';
|
||||
import Logs from './Logs';
|
||||
import Footer from './Footer';
|
||||
import type {Content} from '../types/content';
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
|
@ -33,7 +34,7 @@ const styles = {
|
|||
width: '100%',
|
||||
},
|
||||
content: {
|
||||
flex: 1,
|
||||
flex: 1,
|
||||
overflow: 'auto',
|
||||
},
|
||||
};
|
||||
|
@ -54,21 +55,16 @@ export type Props = {
|
|||
send: string => void,
|
||||
};
|
||||
|
||||
type State = {};
|
||||
|
||||
// Main renders the chosen content.
|
||||
class Main extends Component<Props> {
|
||||
class Main extends Component<Props, State> {
|
||||
constructor(props) {
|
||||
super(props);
|
||||
this.container = React.createRef();
|
||||
this.content = React.createRef();
|
||||
}
|
||||
|
||||
getSnapshotBeforeUpdate() {
|
||||
if (this.content && typeof this.content.beforeUpdate === 'function') {
|
||||
return this.content.beforeUpdate();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
componentDidUpdate(prevProps, prevState, snapshot) {
|
||||
if (this.content && typeof this.content.didUpdate === 'function') {
|
||||
this.content.didUpdate(prevProps, prevState, snapshot);
|
||||
|
@ -81,6 +77,13 @@ class Main extends Component<Props> {
|
|||
}
|
||||
};
|
||||
|
||||
getSnapshotBeforeUpdate(prevProps: Readonly<P>, prevState: Readonly<S>) {
|
||||
if (this.content && typeof this.content.beforeUpdate === 'function') {
|
||||
return this.content.beforeUpdate();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
render() {
|
||||
const {
|
||||
classes, active, content, shouldUpdate,
|
||||
|
@ -89,9 +92,20 @@ class Main extends Component<Props> {
|
|||
let children = null;
|
||||
switch (active) {
|
||||
case MENU.get('home').id:
|
||||
children = <div>Work in progress.</div>;
|
||||
break;
|
||||
case MENU.get('chain').id:
|
||||
children = <div>Work in progress.</div>;
|
||||
break;
|
||||
case MENU.get('txpool').id:
|
||||
children = <div>Work in progress.</div>;
|
||||
break;
|
||||
case MENU.get('network').id:
|
||||
children = <Network
|
||||
content={this.props.content.network}
|
||||
container={this.container}
|
||||
/>;
|
||||
break;
|
||||
case MENU.get('system').id:
|
||||
children = <div>Work in progress.</div>;
|
||||
break;
|
||||
|
|
|
@ -0,0 +1,529 @@
|
|||
// @flow
|
||||
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import Table from '@material-ui/core/Table';
|
||||
import TableHead from '@material-ui/core/TableHead';
|
||||
import TableBody from '@material-ui/core/TableBody';
|
||||
import TableRow from '@material-ui/core/TableRow';
|
||||
import TableCell from '@material-ui/core/TableCell';
|
||||
import Grid from '@material-ui/core/Grid/Grid';
|
||||
import Typography from '@material-ui/core/Typography';
|
||||
import {AreaChart, Area, Tooltip, YAxis} from 'recharts';
|
||||
import {FontAwesomeIcon} from '@fortawesome/react-fontawesome';
|
||||
import {faCircle as fasCircle} from '@fortawesome/free-solid-svg-icons';
|
||||
import {faCircle as farCircle} from '@fortawesome/free-regular-svg-icons';
|
||||
import convert from 'color-convert';
|
||||
|
||||
import CustomTooltip, {bytePlotter, multiplier} from 'CustomTooltip';
|
||||
import type {Network as NetworkType, PeerEvent} from '../types/content';
|
||||
import {styles as commonStyles, chartStrokeWidth, hues, hueScale} from '../common';
|
||||
|
||||
// Peer chart dimensions.
|
||||
const trafficChartHeight = 18;
|
||||
const trafficChartWidth = 400;
|
||||
|
||||
// setMaxIngress adjusts the peer chart's gradient values based on the given value.
|
||||
const setMaxIngress = (peer, value) => {
|
||||
peer.maxIngress = value;
|
||||
peer.ingressGradient = [];
|
||||
peer.ingressGradient.push({offset: hueScale[0], color: hues[0]});
|
||||
let i = 1;
|
||||
for (; i < hues.length && value > hueScale[i]; i++) {
|
||||
peer.ingressGradient.push({offset: Math.floor(hueScale[i] * 100 / value), color: hues[i]});
|
||||
}
|
||||
i--;
|
||||
if (i < hues.length - 1) {
|
||||
// Usually the maximum value gets between two points on the predefined
|
||||
// color scale (e.g. 123KB is somewhere between 100KB (#FFFF00) and
|
||||
// 1MB (#FF0000)), and the charts need to be comparable by the colors,
|
||||
// so we have to calculate the last hue using the maximum value and the
|
||||
// surrounding hues in order to avoid the uniformity of the top colors
|
||||
// on the charts. For this reason the two hues are translated into the
|
||||
// CIELAB color space, and the top color will be their weighted average
|
||||
// (CIELAB is perceptually uniform, meaning that any point on the line
|
||||
// between two pure color points is also a pure color, so the weighted
|
||||
// average will not lose from the saturation).
|
||||
//
|
||||
// In case the maximum value is greater than the biggest predefined
|
||||
// scale value, the top of the chart will have uniform color.
|
||||
const lastHue = convert.hex.lab(hues[i]);
|
||||
const proportion = (value - hueScale[i]) * 100 / (hueScale[i + 1] - hueScale[i]);
|
||||
convert.hex.lab(hues[i + 1]).forEach((val, j) => {
|
||||
lastHue[j] = (lastHue[j] * proportion + val * (100 - proportion)) / 100;
|
||||
});
|
||||
peer.ingressGradient.push({offset: 100, color: `#${convert.lab.hex(lastHue)}`});
|
||||
}
|
||||
};
|
||||
|
||||
// setMaxEgress adjusts the peer chart's gradient values based on the given value.
|
||||
// In case of the egress the chart is upside down, so the gradients need to be
|
||||
// calculated inversely compared to the ingress.
|
||||
const setMaxEgress = (peer, value) => {
|
||||
peer.maxEgress = value;
|
||||
peer.egressGradient = [];
|
||||
peer.egressGradient.push({offset: 100 - hueScale[0], color: hues[0]});
|
||||
let i = 1;
|
||||
for (; i < hues.length && value > hueScale[i]; i++) {
|
||||
peer.egressGradient.unshift({offset: 100 - Math.floor(hueScale[i] * 100 / value), color: hues[i]});
|
||||
}
|
||||
i--;
|
||||
if (i < hues.length - 1) {
|
||||
// Calculate the last hue.
|
||||
const lastHue = convert.hex.lab(hues[i]);
|
||||
const proportion = (value - hueScale[i]) * 100 / (hueScale[i + 1] - hueScale[i]);
|
||||
convert.hex.lab(hues[i + 1]).forEach((val, j) => {
|
||||
lastHue[j] = (lastHue[j] * proportion + val * (100 - proportion)) / 100;
|
||||
});
|
||||
peer.egressGradient.unshift({offset: 0, color: `#${convert.lab.hex(lastHue)}`});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// setIngressChartAttributes searches for the maximum value of the ingress
|
||||
// samples, and adjusts the peer chart's gradient values accordingly.
|
||||
const setIngressChartAttributes = (peer) => {
|
||||
let max = 0;
|
||||
peer.ingress.forEach(({value}) => {
|
||||
if (value > max) {
|
||||
max = value;
|
||||
}
|
||||
});
|
||||
setMaxIngress(peer, max);
|
||||
};
|
||||
|
||||
// setEgressChartAttributes searches for the maximum value of the egress
|
||||
// samples, and adjusts the peer chart's gradient values accordingly.
|
||||
const setEgressChartAttributes = (peer) => {
|
||||
let max = 0;
|
||||
peer.egress.forEach(({value}) => {
|
||||
if (value > max) {
|
||||
max = value;
|
||||
}
|
||||
});
|
||||
setMaxEgress(peer, max);
|
||||
};
|
||||
|
||||
// inserter is a state updater function for the main component, which handles the peers.
|
||||
export const inserter = (sampleLimit: number) => (update: NetworkType, prev: NetworkType) => {
|
||||
// The first message contains the metered peer history.
|
||||
if (update.peers && update.peers.bundles) {
|
||||
prev.peers = update.peers;
|
||||
Object.values(prev.peers.bundles).forEach((bundle) => {
|
||||
if (bundle.knownPeers) {
|
||||
Object.values(bundle.knownPeers).forEach((peer) => {
|
||||
if (!peer.maxIngress) {
|
||||
setIngressChartAttributes(peer);
|
||||
}
|
||||
if (!peer.maxEgress) {
|
||||
setEgressChartAttributes(peer);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
if (Array.isArray(update.diff)) {
|
||||
update.diff.forEach((event: PeerEvent) => {
|
||||
if (!event.ip) {
|
||||
console.error('Peer event without IP', event);
|
||||
return;
|
||||
}
|
||||
switch (event.remove) {
|
||||
case 'bundle': {
|
||||
delete prev.peers.bundles[event.ip];
|
||||
return;
|
||||
}
|
||||
case 'known': {
|
||||
if (!event.id) {
|
||||
console.error('Remove known peer event without ID', event.ip);
|
||||
return;
|
||||
}
|
||||
const bundle = prev.peers.bundles[event.ip];
|
||||
if (!bundle || !bundle.knownPeers || !bundle.knownPeers[event.id]) {
|
||||
console.error('No known peer to remove', event.ip, event.id);
|
||||
return;
|
||||
}
|
||||
delete bundle.knownPeers[event.id];
|
||||
return;
|
||||
}
|
||||
case 'attempt': {
|
||||
const bundle = prev.peers.bundles[event.ip];
|
||||
if (!bundle || !Array.isArray(bundle.attempts) || bundle.attempts.length < 1) {
|
||||
console.error('No unknown peer to remove', event.ip);
|
||||
return;
|
||||
}
|
||||
bundle.attempts.splice(0, 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (!prev.peers.bundles[event.ip]) {
|
||||
prev.peers.bundles[event.ip] = {
|
||||
location: {
|
||||
country: '',
|
||||
city: '',
|
||||
latitude: 0,
|
||||
longitude: 0,
|
||||
},
|
||||
knownPeers: {},
|
||||
attempts: [],
|
||||
};
|
||||
}
|
||||
const bundle = prev.peers.bundles[event.ip];
|
||||
if (event.location) {
|
||||
bundle.location = event.location;
|
||||
return;
|
||||
}
|
||||
if (!event.id) {
|
||||
if (!bundle.attempts) {
|
||||
bundle.attempts = [];
|
||||
}
|
||||
bundle.attempts.push({
|
||||
connected: event.connected,
|
||||
disconnected: event.disconnected,
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (!bundle.knownPeers) {
|
||||
bundle.knownPeers = {};
|
||||
}
|
||||
if (!bundle.knownPeers[event.id]) {
|
||||
bundle.knownPeers[event.id] = {
|
||||
connected: [],
|
||||
disconnected: [],
|
||||
ingress: [],
|
||||
egress: [],
|
||||
active: false,
|
||||
};
|
||||
}
|
||||
const peer = bundle.knownPeers[event.id];
|
||||
if (!peer.maxIngress) {
|
||||
setIngressChartAttributes(peer);
|
||||
}
|
||||
if (!peer.maxEgress) {
|
||||
setEgressChartAttributes(peer);
|
||||
}
|
||||
if (event.connected) {
|
||||
if (!peer.connected) {
|
||||
console.warn('peer.connected should exist');
|
||||
peer.connected = [];
|
||||
}
|
||||
peer.connected.push(event.connected);
|
||||
}
|
||||
if (event.disconnected) {
|
||||
if (!peer.disconnected) {
|
||||
console.warn('peer.disconnected should exist');
|
||||
peer.disconnected = [];
|
||||
}
|
||||
peer.disconnected.push(event.disconnected);
|
||||
}
|
||||
switch (event.activity) {
|
||||
case 'active':
|
||||
peer.active = true;
|
||||
break;
|
||||
case 'inactive':
|
||||
peer.active = false;
|
||||
break;
|
||||
}
|
||||
if (Array.isArray(event.ingress) && Array.isArray(event.egress)) {
|
||||
if (event.ingress.length !== event.egress.length) {
|
||||
console.error('Different traffic sample length', event);
|
||||
return;
|
||||
}
|
||||
// Check if there is a new maximum value, and reset the colors in case.
|
||||
let maxIngress = peer.maxIngress;
|
||||
event.ingress.forEach(({value}) => {
|
||||
if (value > maxIngress) {
|
||||
maxIngress = value;
|
||||
}
|
||||
});
|
||||
if (maxIngress > peer.maxIngress) {
|
||||
setMaxIngress(peer, maxIngress);
|
||||
}
|
||||
// Push the new values.
|
||||
peer.ingress.splice(peer.ingress.length, 0, ...event.ingress);
|
||||
const ingressDiff = peer.ingress.length - sampleLimit;
|
||||
if (ingressDiff > 0) {
|
||||
// Check if the maximum value is in the beginning.
|
||||
let i = 0;
|
||||
while (i < ingressDiff && peer.ingress[i].value < peer.maxIngress) {
|
||||
i++;
|
||||
}
|
||||
// Remove the old values from the beginning.
|
||||
peer.ingress.splice(0, ingressDiff);
|
||||
if (i < ingressDiff) {
|
||||
// Reset the colors if the maximum value leaves the chart.
|
||||
setIngressChartAttributes(peer);
|
||||
}
|
||||
}
|
||||
// Check if there is a new maximum value, and reset the colors in case.
|
||||
let maxEgress = peer.maxEgress;
|
||||
event.egress.forEach(({value}) => {
|
||||
if (value > maxEgress) {
|
||||
maxEgress = value;
|
||||
}
|
||||
});
|
||||
if (maxEgress > peer.maxEgress) {
|
||||
setMaxEgress(peer, maxEgress);
|
||||
}
|
||||
// Push the new values.
|
||||
peer.egress.splice(peer.egress.length, 0, ...event.egress);
|
||||
const egressDiff = peer.egress.length - sampleLimit;
|
||||
if (egressDiff > 0) {
|
||||
// Check if the maximum value is in the beginning.
|
||||
let i = 0;
|
||||
while (i < egressDiff && peer.egress[i].value < peer.maxEgress) {
|
||||
i++;
|
||||
}
|
||||
// Remove the old values from the beginning.
|
||||
peer.egress.splice(0, egressDiff);
|
||||
if (i < egressDiff) {
|
||||
// Reset the colors if the maximum value leaves the chart.
|
||||
setEgressChartAttributes(peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return prev;
|
||||
};
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
tableHead: {
|
||||
height: 'auto',
|
||||
},
|
||||
tableRow: {
|
||||
height: 'auto',
|
||||
},
|
||||
tableCell: {
|
||||
paddingTop: 0,
|
||||
paddingRight: 5,
|
||||
paddingBottom: 0,
|
||||
paddingLeft: 5,
|
||||
border: 'none',
|
||||
},
|
||||
};
|
||||
|
||||
export type Props = {
|
||||
container: Object,
|
||||
content: NetworkType,
|
||||
shouldUpdate: Object,
|
||||
};
|
||||
|
||||
type State = {};
|
||||
|
||||
// Network renders the network page.
|
||||
class Network extends Component<Props, State> {
|
||||
componentDidMount() {
|
||||
const {container} = this.props;
|
||||
if (typeof container === 'undefined') {
|
||||
return;
|
||||
}
|
||||
container.scrollTop = 0;
|
||||
}
|
||||
|
||||
formatTime = (t: string) => {
|
||||
const time = new Date(t);
|
||||
if (isNaN(time)) {
|
||||
return '';
|
||||
}
|
||||
const month = `0${time.getMonth() + 1}`.slice(-2);
|
||||
const date = `0${time.getDate()}`.slice(-2);
|
||||
const hours = `0${time.getHours()}`.slice(-2);
|
||||
const minutes = `0${time.getMinutes()}`.slice(-2);
|
||||
const seconds = `0${time.getSeconds()}`.slice(-2);
|
||||
return `${month}/${date}/${hours}:${minutes}:${seconds}`;
|
||||
};
|
||||
|
||||
copyToClipboard = (id) => (event) => {
|
||||
event.preventDefault();
|
||||
navigator.clipboard.writeText(id).then(() => {}, () => {
|
||||
console.error("Failed to copy node id", id);
|
||||
});
|
||||
};
|
||||
|
||||
peerTableRow = (ip, id, bundle, peer) => {
|
||||
const ingressValues = peer.ingress.map(({value}) => ({ingress: value || 0.001}));
|
||||
const egressValues = peer.egress.map(({value}) => ({egress: -value || -0.001}));
|
||||
|
||||
return (
|
||||
<TableRow key={`known_${ip}_${id}`} style={styles.tableRow}>
|
||||
<TableCell style={styles.tableCell}>
|
||||
{peer.active
|
||||
? <FontAwesomeIcon icon={fasCircle} color='green' />
|
||||
: <FontAwesomeIcon icon={farCircle} style={commonStyles.light} />
|
||||
}
|
||||
</TableCell>
|
||||
<TableCell style={{fontFamily: 'monospace', cursor: 'copy', ...styles.tableCell, ...commonStyles.light}} onClick={this.copyToClipboard(id)}>
|
||||
{id.substring(0, 10)}
|
||||
</TableCell>
|
||||
<TableCell style={styles.tableCell}>
|
||||
{bundle.location ? (() => {
|
||||
const l = bundle.location;
|
||||
return `${l.country ? l.country : ''}${l.city ? `/${l.city}` : ''}`;
|
||||
})() : ''}
|
||||
</TableCell>
|
||||
<TableCell style={styles.tableCell}>
|
||||
<AreaChart
|
||||
width={trafficChartWidth}
|
||||
height={trafficChartHeight}
|
||||
data={ingressValues}
|
||||
margin={{top: 5, right: 5, bottom: 0, left: 5}}
|
||||
syncId={`peerIngress_${ip}_${id}`}
|
||||
>
|
||||
<defs>
|
||||
<linearGradient id={`ingressGradient_${ip}_${id}`} x1='0' y1='1' x2='0' y2='0'>
|
||||
{peer.ingressGradient
|
||||
&& peer.ingressGradient.map(({offset, color}, i) => (
|
||||
<stop
|
||||
key={`ingressStop_${ip}_${id}_${i}`}
|
||||
offset={`${offset}%`}
|
||||
stopColor={color}
|
||||
/>
|
||||
))}
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<Tooltip cursor={false} content={<CustomTooltip tooltip={bytePlotter('Download')} />} />
|
||||
<YAxis hide scale='sqrt' domain={[0.001, dataMax => Math.max(dataMax, 0)]} />
|
||||
<Area
|
||||
dataKey='ingress'
|
||||
isAnimationActive={false}
|
||||
type='monotone'
|
||||
fill={`url(#ingressGradient_${ip}_${id})`}
|
||||
stroke={peer.ingressGradient[peer.ingressGradient.length - 1].color}
|
||||
strokeWidth={chartStrokeWidth}
|
||||
/>
|
||||
</AreaChart>
|
||||
<AreaChart
|
||||
width={trafficChartWidth}
|
||||
height={trafficChartHeight}
|
||||
data={egressValues}
|
||||
margin={{top: 0, right: 5, bottom: 5, left: 5}}
|
||||
syncId={`peerIngress_${ip}_${id}`}
|
||||
>
|
||||
<defs>
|
||||
<linearGradient id={`egressGradient_${ip}_${id}`} x1='0' y1='1' x2='0' y2='0'>
|
||||
{peer.egressGradient
|
||||
&& peer.egressGradient.map(({offset, color}, i) => (
|
||||
<stop
|
||||
key={`egressStop_${ip}_${id}_${i}`}
|
||||
offset={`${offset}%`}
|
||||
stopColor={color}
|
||||
/>
|
||||
))}
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<Tooltip cursor={false} content={<CustomTooltip tooltip={bytePlotter('Upload', multiplier(-1))} />} />
|
||||
<YAxis hide scale='sqrt' domain={[dataMin => Math.min(dataMin, 0), -0.001]} />
|
||||
<Area
|
||||
dataKey='egress'
|
||||
isAnimationActive={false}
|
||||
type='monotone'
|
||||
fill={`url(#egressGradient_${ip}_${id})`}
|
||||
stroke={peer.egressGradient[0].color}
|
||||
strokeWidth={chartStrokeWidth}
|
||||
/>
|
||||
</AreaChart>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
);
|
||||
};
|
||||
|
||||
render() {
|
||||
return (
|
||||
<Grid container direction='row' justify='space-between'>
|
||||
<Grid item>
|
||||
<Table>
|
||||
<TableHead style={styles.tableHead}>
|
||||
<TableRow style={styles.tableRow}>
|
||||
<TableCell style={styles.tableCell} />
|
||||
<TableCell style={styles.tableCell}>Node ID</TableCell>
|
||||
<TableCell style={styles.tableCell}>Location</TableCell>
|
||||
<TableCell style={styles.tableCell}>Traffic</TableCell>
|
||||
</TableRow>
|
||||
</TableHead>
|
||||
<TableBody>
|
||||
{Object.entries(this.props.content.peers.bundles).map(([ip, bundle]) => {
|
||||
if (!bundle.knownPeers || Object.keys(bundle.knownPeers).length < 1) {
|
||||
return null;
|
||||
}
|
||||
return Object.entries(bundle.knownPeers).map(([id, peer]) => {
|
||||
if (peer.active === false) {
|
||||
return null;
|
||||
}
|
||||
return this.peerTableRow(ip, id, bundle, peer);
|
||||
});
|
||||
})}
|
||||
</TableBody>
|
||||
<TableBody>
|
||||
{Object.entries(this.props.content.peers.bundles).map(([ip, bundle]) => {
|
||||
if (!bundle.knownPeers || Object.keys(bundle.knownPeers).length < 1) {
|
||||
return null;
|
||||
}
|
||||
return Object.entries(bundle.knownPeers).map(([id, peer]) => {
|
||||
if (peer.active === true) {
|
||||
return null;
|
||||
}
|
||||
return this.peerTableRow(ip, id, bundle, peer);
|
||||
});
|
||||
})}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</Grid>
|
||||
<Grid item>
|
||||
<Typography variant='subtitle1' gutterBottom>
|
||||
Connection attempts
|
||||
</Typography>
|
||||
<Table>
|
||||
<TableHead style={styles.tableHead}>
|
||||
<TableRow style={styles.tableRow}>
|
||||
<TableCell style={styles.tableCell}>IP</TableCell>
|
||||
<TableCell style={styles.tableCell}>Location</TableCell>
|
||||
<TableCell style={styles.tableCell}>Nr</TableCell>
|
||||
</TableRow>
|
||||
</TableHead>
|
||||
<TableBody>
|
||||
{Object.entries(this.props.content.peers.bundles).map(([ip, bundle]) => {
|
||||
if (!bundle.attempts || bundle.attempts.length < 1) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<TableRow key={`attempt_${ip}`} style={styles.tableRow}>
|
||||
<TableCell style={styles.tableCell}>{ip}</TableCell>
|
||||
<TableCell style={styles.tableCell}>
|
||||
{bundle.location ? (() => {
|
||||
const l = bundle.location;
|
||||
return `${l.country ? l.country : ''}${l.city ? `/${l.city}` : ''}`;
|
||||
})() : ''}
|
||||
</TableCell>
|
||||
<TableCell style={styles.tableCell}>
|
||||
{Object.values(bundle.attempts).length}
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
);
|
||||
})}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</Grid>
|
||||
</Grid>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default Network;
|
|
@ -18,11 +18,14 @@
|
|||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import withStyles from 'material-ui/styles/withStyles';
|
||||
import List, {ListItem, ListItemIcon, ListItemText} from 'material-ui/List';
|
||||
import Icon from 'material-ui/Icon';
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
import List from '@material-ui/core/List';
|
||||
import ListItem from '@material-ui/core/ListItem';
|
||||
import ListItemIcon from '@material-ui/core/ListItemIcon';
|
||||
import ListItemText from '@material-ui/core/ListItemText';
|
||||
import Icon from '@material-ui/core/Icon';
|
||||
import Transition from 'react-transition-group/Transition';
|
||||
import {Icon as FontAwesome} from 'react-fa';
|
||||
import {FontAwesomeIcon} from '@fortawesome/react-fontawesome';
|
||||
|
||||
import {MENU, DURATION} from '../common';
|
||||
|
||||
|
@ -48,6 +51,7 @@ const themeStyles = theme => ({
|
|||
},
|
||||
icon: {
|
||||
fontSize: theme.spacing.unit * 3,
|
||||
overflow: 'unset',
|
||||
},
|
||||
});
|
||||
|
||||
|
@ -57,9 +61,11 @@ export type Props = {
|
|||
changeContent: string => void,
|
||||
};
|
||||
|
||||
type State = {}
|
||||
|
||||
// SideBar renders the sidebar of the dashboard.
|
||||
class SideBar extends Component<Props> {
|
||||
shouldComponentUpdate(nextProps) {
|
||||
class SideBar extends Component<Props, State> {
|
||||
shouldComponentUpdate(nextProps: Readonly<Props>, nextState: Readonly<State>, nextContext: any) {
|
||||
return nextProps.opened !== this.props.opened;
|
||||
}
|
||||
|
||||
|
@ -78,7 +84,7 @@ class SideBar extends Component<Props> {
|
|||
<ListItem button key={menu.id} onClick={this.clickOn(menu.id)} className={classes.listItem}>
|
||||
<ListItemIcon>
|
||||
<Icon className={classes.icon}>
|
||||
<FontAwesome name={menu.icon} />
|
||||
<FontAwesomeIcon icon={menu.icon} />
|
||||
</Icon>
|
||||
</ListItemIcon>
|
||||
<ListItemText
|
||||
|
|
|
@ -16,10 +16,8 @@
|
|||
|
||||
// fa-only-woff-loader removes the .eot, .ttf, .svg dependencies of the FontAwesome library,
|
||||
// because they produce unused extra blobs.
|
||||
module.exports = function(content) {
|
||||
return content
|
||||
.replace(/src.*url(?!.*url.*(\.eot)).*(\.eot)[^;]*;/,'')
|
||||
.replace(/url(?!.*url.*(\.eot)).*(\.eot)[^,]*,/,'')
|
||||
.replace(/url(?!.*url.*(\.ttf)).*(\.ttf)[^,]*,/,'')
|
||||
.replace(/,[^,]*url(?!.*url.*(\.svg)).*(\.svg)[^;]*;/,';');
|
||||
};
|
||||
module.exports = content => content
|
||||
.replace(/src.*url(?!.*url.*(\.eot)).*(\.eot)[^;]*;/, '')
|
||||
.replace(/url(?!.*url.*(\.eot)).*(\.eot)[^,]*,/, '')
|
||||
.replace(/url(?!.*url.*(\.ttf)).*(\.ttf)[^,]*,/, '')
|
||||
.replace(/,[^,]*url(?!.*url.*(\.svg)).*(\.svg)[^;]*;/, ';');
|
||||
|
|
|
@ -21,6 +21,6 @@
|
|||
</head>
|
||||
<body style="height: 100%; margin: 0">
|
||||
<div id="dashboard" style="height: 100%"></div>
|
||||
<script src="bundle.js"></script>
|
||||
<script type="text/javascript" src="bundle.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
|
|
@ -19,12 +19,15 @@
|
|||
import React from 'react';
|
||||
import {render} from 'react-dom';
|
||||
|
||||
import MuiThemeProvider from 'material-ui/styles/MuiThemeProvider';
|
||||
import createMuiTheme from 'material-ui/styles/createMuiTheme';
|
||||
import MuiThemeProvider from '@material-ui/core/styles/MuiThemeProvider';
|
||||
import createMuiTheme from '@material-ui/core/styles/createMuiTheme';
|
||||
|
||||
import Dashboard from './components/Dashboard';
|
||||
|
||||
const theme: Object = createMuiTheme({
|
||||
// typography: {
|
||||
// useNextVariants: true,
|
||||
// },
|
||||
palette: {
|
||||
type: 'dark',
|
||||
},
|
||||
|
|
|
@ -1,48 +1,65 @@
|
|||
{
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"babel-core": "^6.26.0",
|
||||
"babel-eslint": "^8.2.1",
|
||||
"babel-loader": "^7.1.2",
|
||||
"babel-plugin-transform-class-properties": "^6.24.1",
|
||||
"babel-plugin-transform-decorators-legacy": "^1.3.4",
|
||||
"babel-plugin-transform-flow-strip-types": "^6.22.0",
|
||||
"babel-plugin-transform-runtime": "^6.23.0",
|
||||
"babel-preset-env": "^1.6.1",
|
||||
"babel-preset-react": "^6.24.1",
|
||||
"babel-preset-stage-0": "^6.24.1",
|
||||
"babel-runtime": "^6.26.0",
|
||||
"classnames": "^2.2.5",
|
||||
"css-loader": "^0.28.9",
|
||||
"@babel/core": "7.3.4",
|
||||
"@babel/plugin-proposal-class-properties": "7.3.4",
|
||||
"@babel/plugin-proposal-function-bind": "7.2.0",
|
||||
"@babel/plugin-transform-flow-strip-types": "7.3.4",
|
||||
"@babel/preset-env": "7.3.4",
|
||||
"@babel/preset-react": "^7.0.0",
|
||||
"@babel/preset-stage-0": "^7.0.0",
|
||||
"@fortawesome/fontawesome-free-regular": "^5.0.13",
|
||||
"@fortawesome/fontawesome-svg-core": "^1.2.15",
|
||||
"@fortawesome/free-regular-svg-icons": "^5.7.2",
|
||||
"@fortawesome/free-solid-svg-icons": "^5.7.2",
|
||||
"@fortawesome/react-fontawesome": "^0.1.4",
|
||||
"@material-ui/core": "3.9.2",
|
||||
"@material-ui/icons": "3.0.2",
|
||||
"babel-eslint": "10.0.1",
|
||||
"babel-loader": "8.0.5",
|
||||
"classnames": "^2.2.6",
|
||||
"color-convert": "^2.0.0",
|
||||
"css-loader": "2.1.1",
|
||||
"escape-html": "^1.0.3",
|
||||
"eslint": "^4.16.0",
|
||||
"eslint-config-airbnb": "^16.1.0",
|
||||
"eslint-loader": "^2.0.0",
|
||||
"eslint-plugin-flowtype": "^2.41.0",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
"eslint-plugin-jsx-a11y": "^6.0.3",
|
||||
"eslint-plugin-react": "^7.5.1",
|
||||
"file-loader": "^1.1.6",
|
||||
"flow-bin": "^0.63.1",
|
||||
"flow-bin-loader": "^1.0.2",
|
||||
"flow-typed": "^2.2.3",
|
||||
"material-ui": "^1.0.0-beta.30",
|
||||
"material-ui-icons": "^1.0.0-beta.17",
|
||||
"eslint": "5.15.1",
|
||||
"eslint-config-airbnb": "^17.0.0",
|
||||
"eslint-loader": "2.1.2",
|
||||
"eslint-plugin-flowtype": "3.4.2",
|
||||
"eslint-plugin-import": "2.16.0",
|
||||
"eslint-plugin-jsx-a11y": "6.2.1",
|
||||
"eslint-plugin-node": "8.0.1",
|
||||
"eslint-plugin-promise": "4.0.1",
|
||||
"eslint-plugin-react": "7.12.4",
|
||||
"file-loader": "3.0.1",
|
||||
"flow-bin": "0.94.0",
|
||||
"flow-bin-loader": "^1.0.3",
|
||||
"flow-typed": "^2.5.1",
|
||||
"js-beautify": "1.9.0",
|
||||
"path": "^0.12.7",
|
||||
"react": "^16.2.0",
|
||||
"react-dom": "^16.2.0",
|
||||
"react-fa": "^5.0.0",
|
||||
"react-transition-group": "^2.2.1",
|
||||
"recharts": "^1.0.0-beta.9",
|
||||
"style-loader": "^0.19.1",
|
||||
"react": "16.8.4",
|
||||
"react-dom": "16.8.4",
|
||||
"react-hot-loader": "4.8.0",
|
||||
"react-transition-group": "2.6.0",
|
||||
"recharts": "1.5.0",
|
||||
"style-loader": "0.23.1",
|
||||
"terser-webpack-plugin": "^1.2.3",
|
||||
"url": "^0.11.0",
|
||||
"url-loader": "^0.6.2",
|
||||
"webpack": "^3.10.0",
|
||||
"webpack-dev-server": "^2.11.1"
|
||||
"url-loader": "1.1.2",
|
||||
"webpack": "4.29.6",
|
||||
"webpack-cli": "3.2.3",
|
||||
"webpack-dashboard": "3.0.0",
|
||||
"webpack-dev-server": "3.2.1",
|
||||
"webpack-merge": "4.2.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "NODE_ENV=production webpack",
|
||||
"stats": "webpack --profile --json > stats.json",
|
||||
"dev": "webpack-dev-server --port 8081",
|
||||
"flow": "flow-typed install"
|
||||
}
|
||||
"build": "webpack --config webpack.config.prod.js",
|
||||
"stats": "webpack --config webpack.config.prod.js --profile --json > stats.json",
|
||||
"dev": "webpack-dev-server --open --config webpack.config.dev.js",
|
||||
"dash": "webpack-dashboard -- yarn dev",
|
||||
"install-flow": "flow-typed install",
|
||||
"flow": "flow status --show-all-errors",
|
||||
"eslint": "eslint **/*"
|
||||
},
|
||||
"sideEffects": false,
|
||||
"license": "LGPL-3.0-or-later"
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ export type Content = {
|
|||
export type ChartEntries = Array<ChartEntry>;
|
||||
|
||||
export type ChartEntry = {
|
||||
time: Date,
|
||||
value: number,
|
||||
};
|
||||
|
||||
|
@ -51,7 +50,50 @@ export type TxPool = {
|
|||
};
|
||||
|
||||
export type Network = {
|
||||
/* TODO (kurkomisi) */
|
||||
peers: Peers,
|
||||
diff: Array<PeerEvent>
|
||||
};
|
||||
|
||||
export type PeerEvent = {
|
||||
ip: string,
|
||||
id: string,
|
||||
remove: string,
|
||||
location: GeoLocation,
|
||||
connected: Date,
|
||||
disconnected: Date,
|
||||
ingress: ChartEntries,
|
||||
egress: ChartEntries,
|
||||
activity: string,
|
||||
};
|
||||
|
||||
export type Peers = {
|
||||
bundles: {[string]: PeerBundle},
|
||||
};
|
||||
|
||||
export type PeerBundle = {
|
||||
location: GeoLocation,
|
||||
knownPeers: {[string]: KnownPeer},
|
||||
attempts: Array<UnknownPeer>,
|
||||
};
|
||||
|
||||
export type KnownPeer = {
|
||||
connected: Array<Date>,
|
||||
disconnected: Array<Date>,
|
||||
ingress: Array<ChartEntries>,
|
||||
egress: Array<ChartEntries>,
|
||||
active: boolean,
|
||||
};
|
||||
|
||||
export type UnknownPeer = {
|
||||
connected: Date,
|
||||
disconnected: Date,
|
||||
};
|
||||
|
||||
export type GeoLocation = {
|
||||
country: string,
|
||||
city: string,
|
||||
latitude: number,
|
||||
longitude: number,
|
||||
};
|
||||
|
||||
export type System = {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
|
@ -14,28 +14,25 @@
|
|||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
const webpack = require('webpack');
|
||||
const path = require('path');
|
||||
|
||||
module.exports = {
|
||||
target: 'web',
|
||||
entry: {
|
||||
bundle: './index',
|
||||
},
|
||||
output: {
|
||||
filename: '[name].js',
|
||||
path: path.resolve(__dirname, ''),
|
||||
sourceMapFilename: '[file].map',
|
||||
},
|
||||
resolve: {
|
||||
modules: [
|
||||
'node_modules',
|
||||
path.resolve(__dirname, 'components'), // import './components/Component' -> import 'Component'
|
||||
],
|
||||
extensions: ['.js', '.jsx'],
|
||||
},
|
||||
entry: './index',
|
||||
output: {
|
||||
path: path.resolve(__dirname, ''),
|
||||
filename: 'bundle.js',
|
||||
},
|
||||
plugins: [
|
||||
new webpack.optimize.UglifyJsPlugin({
|
||||
comments: false,
|
||||
mangle: false,
|
||||
beautify: true,
|
||||
}),
|
||||
new webpack.DefinePlugin({
|
||||
PROD: process.env.NODE_ENV === 'production',
|
||||
}),
|
||||
],
|
||||
module: {
|
||||
rules: [
|
||||
{
|
||||
|
@ -45,27 +42,38 @@ module.exports = {
|
|||
{
|
||||
loader: 'babel-loader',
|
||||
options: {
|
||||
plugins: [ // order: from top to bottom
|
||||
// 'transform-decorators-legacy', // @withStyles, @withTheme
|
||||
'transform-class-properties', // static defaultProps
|
||||
'transform-flow-strip-types',
|
||||
],
|
||||
presets: [ // order: from bottom to top
|
||||
'env',
|
||||
'react',
|
||||
'stage-0',
|
||||
'@babel/env',
|
||||
'@babel/react',
|
||||
],
|
||||
plugins: [ // order: from top to bottom
|
||||
'@babel/proposal-function-bind', // instead of stage 0
|
||||
'@babel/proposal-class-properties', // static defaultProps
|
||||
'@babel/transform-flow-strip-types',
|
||||
'react-hot-loader/babel',
|
||||
],
|
||||
},
|
||||
},
|
||||
// 'eslint-loader', // show errors not only in the editor, but also in the console
|
||||
// 'eslint-loader', // show errors in the console
|
||||
],
|
||||
},
|
||||
{
|
||||
test: /font-awesome\.css$/,
|
||||
use: [
|
||||
'style-loader',
|
||||
'css-loader',
|
||||
path.resolve(__dirname, './fa-only-woff-loader.js'),
|
||||
test: /\.css$/,
|
||||
oneOf: [
|
||||
{
|
||||
test: /font-awesome/,
|
||||
use: [
|
||||
'style-loader',
|
||||
'css-loader',
|
||||
path.resolve(__dirname, './fa-only-woff-loader.js'),
|
||||
],
|
||||
},
|
||||
{
|
||||
use: [
|
||||
'style-loader',
|
||||
'css-loader',
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
|
@ -0,0 +1,35 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
const webpack = require('webpack');
|
||||
const merge = require('webpack-merge');
|
||||
const WebpackDashboard = require('webpack-dashboard/plugin');
|
||||
const common = require('./webpack.config.common.js');
|
||||
|
||||
module.exports = merge(common, {
|
||||
mode: 'development',
|
||||
plugins: [
|
||||
new WebpackDashboard(),
|
||||
new webpack.HotModuleReplacementPlugin(),
|
||||
],
|
||||
// devtool: 'eval',
|
||||
devtool: 'source-map',
|
||||
devServer: {
|
||||
port: 8081,
|
||||
hot: true,
|
||||
compress: true,
|
||||
},
|
||||
});
|
|
@ -0,0 +1,41 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
const TerserPlugin = require('terser-webpack-plugin');
|
||||
const merge = require('webpack-merge');
|
||||
const common = require('./webpack.config.common.js');
|
||||
|
||||
module.exports = merge(common, {
|
||||
mode: 'production',
|
||||
devtool: 'source-map',
|
||||
optimization: {
|
||||
minimize: true,
|
||||
namedModules: true, // Module names instead of numbers - resolves the large diff problem.
|
||||
minimizer: [
|
||||
new TerserPlugin({
|
||||
cache: true,
|
||||
parallel: true,
|
||||
sourceMap: true,
|
||||
terserOptions: {
|
||||
output: {
|
||||
comments: false,
|
||||
beautify: true,
|
||||
},
|
||||
},
|
||||
}),
|
||||
],
|
||||
},
|
||||
});
|
File diff suppressed because it is too large
Load Diff
|
@ -18,8 +18,10 @@ package dashboard
|
|||
|
||||
//go:generate yarn --cwd ./assets install
|
||||
//go:generate yarn --cwd ./assets build
|
||||
//go:generate go-bindata -nometadata -o assets.go -prefix assets -nocompress -pkg dashboard assets/index.html assets/bundle.js
|
||||
//go:generate yarn --cwd ./assets js-beautify -f bundle.js.map -r -w 1
|
||||
//go:generate go-bindata -nometadata -o assets.go -prefix assets -nocompress -pkg dashboard assets/index.html assets/bundle.js assets/bundle.js.map
|
||||
//go:generate sh -c "sed 's#var _bundleJs#//nolint:misspell\\\n&#' assets.go > assets.go.tmp && mv assets.go.tmp assets.go"
|
||||
//go:generate sh -c "sed 's#var _bundleJsMap#//nolint:misspell\\\n&#' assets.go > assets.go.tmp && mv assets.go.tmp assets.go"
|
||||
//go:generate sh -c "sed 's#var _indexHtml#//nolint:misspell\\\n&#' assets.go > assets.go.tmp && mv assets.go.tmp assets.go"
|
||||
//go:generate gofmt -w -s assets.go
|
||||
|
||||
|
@ -27,16 +29,13 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"io"
|
||||
|
||||
"github.com/elastic/gosigar"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
@ -45,31 +44,29 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
activeMemorySampleLimit = 200 // Maximum number of active memory data samples
|
||||
virtualMemorySampleLimit = 200 // Maximum number of virtual memory data samples
|
||||
networkIngressSampleLimit = 200 // Maximum number of network ingress data samples
|
||||
networkEgressSampleLimit = 200 // Maximum number of network egress data samples
|
||||
processCPUSampleLimit = 200 // Maximum number of process cpu data samples
|
||||
systemCPUSampleLimit = 200 // Maximum number of system cpu data samples
|
||||
diskReadSampleLimit = 200 // Maximum number of disk read data samples
|
||||
diskWriteSampleLimit = 200 // Maximum number of disk write data samples
|
||||
sampleLimit = 200 // Maximum number of data samples
|
||||
)
|
||||
|
||||
var nextID uint32 // Next connection id
|
||||
|
||||
// Dashboard contains the dashboard internals.
|
||||
type Dashboard struct {
|
||||
config *Config
|
||||
config *Config // Configuration values for the dashboard
|
||||
|
||||
listener net.Listener
|
||||
conns map[uint32]*client // Currently live websocket connections
|
||||
history *Message
|
||||
lock sync.RWMutex // Lock protecting the dashboard's internals
|
||||
listener net.Listener // Network listener listening for dashboard clients
|
||||
conns map[uint32]*client // Currently live websocket connections
|
||||
nextConnID uint32 // Next connection id
|
||||
|
||||
logdir string
|
||||
history *Message // Stored historical data
|
||||
|
||||
lock sync.Mutex // Lock protecting the dashboard's internals
|
||||
sysLock sync.RWMutex // Lock protecting the stored system data
|
||||
peerLock sync.RWMutex // Lock protecting the stored peer data
|
||||
logLock sync.RWMutex // Lock protecting the stored log data
|
||||
|
||||
geodb *geoDB // geoip database instance for IP to geographical information conversions
|
||||
logdir string // Directory containing the log files
|
||||
|
||||
quit chan chan error // Channel used for graceful exit
|
||||
wg sync.WaitGroup
|
||||
wg sync.WaitGroup // Wait group used to close the data collector threads
|
||||
}
|
||||
|
||||
// client represents active websocket connection with a remote browser.
|
||||
|
@ -96,14 +93,14 @@ func New(config *Config, commit string, logdir string) *Dashboard {
|
|||
Version: fmt.Sprintf("v%d.%d.%d%s", params.VersionMajor, params.VersionMinor, params.VersionPatch, versionMeta),
|
||||
},
|
||||
System: &SystemMessage{
|
||||
ActiveMemory: emptyChartEntries(now, activeMemorySampleLimit, config.Refresh),
|
||||
VirtualMemory: emptyChartEntries(now, virtualMemorySampleLimit, config.Refresh),
|
||||
NetworkIngress: emptyChartEntries(now, networkIngressSampleLimit, config.Refresh),
|
||||
NetworkEgress: emptyChartEntries(now, networkEgressSampleLimit, config.Refresh),
|
||||
ProcessCPU: emptyChartEntries(now, processCPUSampleLimit, config.Refresh),
|
||||
SystemCPU: emptyChartEntries(now, systemCPUSampleLimit, config.Refresh),
|
||||
DiskRead: emptyChartEntries(now, diskReadSampleLimit, config.Refresh),
|
||||
DiskWrite: emptyChartEntries(now, diskWriteSampleLimit, config.Refresh),
|
||||
ActiveMemory: emptyChartEntries(now, sampleLimit),
|
||||
VirtualMemory: emptyChartEntries(now, sampleLimit),
|
||||
NetworkIngress: emptyChartEntries(now, sampleLimit),
|
||||
NetworkEgress: emptyChartEntries(now, sampleLimit),
|
||||
ProcessCPU: emptyChartEntries(now, sampleLimit),
|
||||
SystemCPU: emptyChartEntries(now, sampleLimit),
|
||||
DiskRead: emptyChartEntries(now, sampleLimit),
|
||||
DiskWrite: emptyChartEntries(now, sampleLimit),
|
||||
},
|
||||
},
|
||||
logdir: logdir,
|
||||
|
@ -111,12 +108,10 @@ func New(config *Config, commit string, logdir string) *Dashboard {
|
|||
}
|
||||
|
||||
// emptyChartEntries returns a ChartEntry array containing limit number of empty samples.
|
||||
func emptyChartEntries(t time.Time, limit int, refresh time.Duration) ChartEntries {
|
||||
func emptyChartEntries(t time.Time, limit int) ChartEntries {
|
||||
ce := make(ChartEntries, limit)
|
||||
for i := 0; i < limit; i++ {
|
||||
ce[i] = &ChartEntry{
|
||||
Time: t.Add(-time.Duration(i) * refresh),
|
||||
}
|
||||
ce[i] = new(ChartEntry)
|
||||
}
|
||||
return ce
|
||||
}
|
||||
|
@ -132,9 +127,10 @@ func (db *Dashboard) APIs() []rpc.API { return nil }
|
|||
func (db *Dashboard) Start(server *p2p.Server) error {
|
||||
log.Info("Starting dashboard")
|
||||
|
||||
db.wg.Add(2)
|
||||
go db.collectData()
|
||||
db.wg.Add(3)
|
||||
go db.collectSystemData()
|
||||
go db.streamLogs()
|
||||
go db.collectPeerData()
|
||||
|
||||
http.HandleFunc("/", db.webHandler)
|
||||
http.Handle("/api", websocket.Handler(db.apiHandler))
|
||||
|
@ -160,7 +156,7 @@ func (db *Dashboard) Stop() error {
|
|||
}
|
||||
// Close the collectors.
|
||||
errc := make(chan error, 1)
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := 0; i < 3; i++ {
|
||||
db.quit <- errc
|
||||
if err := <-errc; err != nil {
|
||||
errs = append(errs, err)
|
||||
|
@ -206,7 +202,7 @@ func (db *Dashboard) webHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// apiHandler handles requests for the dashboard.
|
||||
func (db *Dashboard) apiHandler(conn *websocket.Conn) {
|
||||
id := atomic.AddUint32(&nextID, 1)
|
||||
id := atomic.AddUint32(&db.nextConnID, 1)
|
||||
client := &client{
|
||||
conn: conn,
|
||||
msg: make(chan *Message, 128),
|
||||
|
@ -233,10 +229,21 @@ func (db *Dashboard) apiHandler(conn *websocket.Conn) {
|
|||
}
|
||||
}()
|
||||
|
||||
db.lock.Lock()
|
||||
// Send the past data.
|
||||
client.msg <- deepcopy.Copy(db.history).(*Message)
|
||||
db.sysLock.RLock()
|
||||
db.peerLock.RLock()
|
||||
db.logLock.RLock()
|
||||
|
||||
h := deepcopy.Copy(db.history).(*Message)
|
||||
|
||||
db.sysLock.RUnlock()
|
||||
db.peerLock.RUnlock()
|
||||
db.logLock.RUnlock()
|
||||
|
||||
client.msg <- h
|
||||
|
||||
// Start tracking the connection and drop at connection loss.
|
||||
db.lock.Lock()
|
||||
db.conns[id] = client
|
||||
db.lock.Unlock()
|
||||
defer func() {
|
||||
|
@ -259,136 +266,6 @@ func (db *Dashboard) apiHandler(conn *websocket.Conn) {
|
|||
}
|
||||
}
|
||||
|
||||
// meterCollector returns a function, which retrieves a specific meter.
|
||||
func meterCollector(name string) func() int64 {
|
||||
if metric := metrics.DefaultRegistry.Get(name); metric != nil {
|
||||
m := metric.(metrics.Meter)
|
||||
return func() int64 {
|
||||
return m.Count()
|
||||
}
|
||||
}
|
||||
return func() int64 {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// collectData collects the required data to plot on the dashboard.
|
||||
func (db *Dashboard) collectData() {
|
||||
defer db.wg.Done()
|
||||
|
||||
systemCPUUsage := gosigar.Cpu{}
|
||||
systemCPUUsage.Get()
|
||||
var (
|
||||
mem runtime.MemStats
|
||||
|
||||
collectNetworkIngress = meterCollector("p2p/InboundTraffic")
|
||||
collectNetworkEgress = meterCollector("p2p/OutboundTraffic")
|
||||
collectDiskRead = meterCollector("eth/db/chaindata/disk/read")
|
||||
collectDiskWrite = meterCollector("eth/db/chaindata/disk/write")
|
||||
|
||||
prevNetworkIngress = collectNetworkIngress()
|
||||
prevNetworkEgress = collectNetworkEgress()
|
||||
prevProcessCPUTime = getProcessCPUTime()
|
||||
prevSystemCPUUsage = systemCPUUsage
|
||||
prevDiskRead = collectDiskRead()
|
||||
prevDiskWrite = collectDiskWrite()
|
||||
|
||||
frequency = float64(db.config.Refresh / time.Second)
|
||||
numCPU = float64(runtime.NumCPU())
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case errc := <-db.quit:
|
||||
errc <- nil
|
||||
return
|
||||
case <-time.After(db.config.Refresh):
|
||||
systemCPUUsage.Get()
|
||||
var (
|
||||
curNetworkIngress = collectNetworkIngress()
|
||||
curNetworkEgress = collectNetworkEgress()
|
||||
curProcessCPUTime = getProcessCPUTime()
|
||||
curSystemCPUUsage = systemCPUUsage
|
||||
curDiskRead = collectDiskRead()
|
||||
curDiskWrite = collectDiskWrite()
|
||||
|
||||
deltaNetworkIngress = float64(curNetworkIngress - prevNetworkIngress)
|
||||
deltaNetworkEgress = float64(curNetworkEgress - prevNetworkEgress)
|
||||
deltaProcessCPUTime = curProcessCPUTime - prevProcessCPUTime
|
||||
deltaSystemCPUUsage = curSystemCPUUsage.Delta(prevSystemCPUUsage)
|
||||
deltaDiskRead = curDiskRead - prevDiskRead
|
||||
deltaDiskWrite = curDiskWrite - prevDiskWrite
|
||||
)
|
||||
prevNetworkIngress = curNetworkIngress
|
||||
prevNetworkEgress = curNetworkEgress
|
||||
prevProcessCPUTime = curProcessCPUTime
|
||||
prevSystemCPUUsage = curSystemCPUUsage
|
||||
prevDiskRead = curDiskRead
|
||||
prevDiskWrite = curDiskWrite
|
||||
|
||||
now := time.Now()
|
||||
|
||||
runtime.ReadMemStats(&mem)
|
||||
activeMemory := &ChartEntry{
|
||||
Time: now,
|
||||
Value: float64(mem.Alloc) / frequency,
|
||||
}
|
||||
virtualMemory := &ChartEntry{
|
||||
Time: now,
|
||||
Value: float64(mem.Sys) / frequency,
|
||||
}
|
||||
networkIngress := &ChartEntry{
|
||||
Time: now,
|
||||
Value: deltaNetworkIngress / frequency,
|
||||
}
|
||||
networkEgress := &ChartEntry{
|
||||
Time: now,
|
||||
Value: deltaNetworkEgress / frequency,
|
||||
}
|
||||
processCPU := &ChartEntry{
|
||||
Time: now,
|
||||
Value: deltaProcessCPUTime / frequency / numCPU * 100,
|
||||
}
|
||||
systemCPU := &ChartEntry{
|
||||
Time: now,
|
||||
Value: float64(deltaSystemCPUUsage.Sys+deltaSystemCPUUsage.User) / frequency / numCPU,
|
||||
}
|
||||
diskRead := &ChartEntry{
|
||||
Time: now,
|
||||
Value: float64(deltaDiskRead) / frequency,
|
||||
}
|
||||
diskWrite := &ChartEntry{
|
||||
Time: now,
|
||||
Value: float64(deltaDiskWrite) / frequency,
|
||||
}
|
||||
sys := db.history.System
|
||||
db.lock.Lock()
|
||||
sys.ActiveMemory = append(sys.ActiveMemory[1:], activeMemory)
|
||||
sys.VirtualMemory = append(sys.VirtualMemory[1:], virtualMemory)
|
||||
sys.NetworkIngress = append(sys.NetworkIngress[1:], networkIngress)
|
||||
sys.NetworkEgress = append(sys.NetworkEgress[1:], networkEgress)
|
||||
sys.ProcessCPU = append(sys.ProcessCPU[1:], processCPU)
|
||||
sys.SystemCPU = append(sys.SystemCPU[1:], systemCPU)
|
||||
sys.DiskRead = append(sys.DiskRead[1:], diskRead)
|
||||
sys.DiskWrite = append(sys.DiskWrite[1:], diskWrite)
|
||||
db.lock.Unlock()
|
||||
|
||||
db.sendToAll(&Message{
|
||||
System: &SystemMessage{
|
||||
ActiveMemory: ChartEntries{activeMemory},
|
||||
VirtualMemory: ChartEntries{virtualMemory},
|
||||
NetworkIngress: ChartEntries{networkIngress},
|
||||
NetworkEgress: ChartEntries{networkEgress},
|
||||
ProcessCPU: ChartEntries{processCPU},
|
||||
SystemCPU: ChartEntries{systemCPU},
|
||||
DiskRead: ChartEntries{diskRead},
|
||||
DiskWrite: ChartEntries{diskWrite},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendToAll sends the given message to the active dashboards.
|
||||
func (db *Dashboard) sendToAll(msg *Message) {
|
||||
db.lock.Lock()
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/apilayer/freegeoip"
|
||||
)
|
||||
|
||||
// geoDBInfo contains all the geographical information we could extract based on an IP
|
||||
// address.
|
||||
type geoDBInfo struct {
|
||||
Country struct {
|
||||
Names struct {
|
||||
English string `maxminddb:"en" json:"en,omitempty"`
|
||||
} `maxminddb:"names" json:"names,omitempty"`
|
||||
} `maxminddb:"country" json:"country,omitempty"`
|
||||
City struct {
|
||||
Names struct {
|
||||
English string `maxminddb:"en" json:"en,omitempty"`
|
||||
} `maxminddb:"names" json:"names,omitempty"`
|
||||
} `maxminddb:"city" json:"city,omitempty"`
|
||||
Location struct {
|
||||
Latitude float64 `maxminddb:"latitude" json:"latitude,omitempty"`
|
||||
Longitude float64 `maxminddb:"longitude" json:"longitude,omitempty"`
|
||||
} `maxminddb:"location" json:"location,omitempty"`
|
||||
}
|
||||
|
||||
// geoLocation contains geographical information.
|
||||
type geoLocation struct {
|
||||
Country string `json:"country,omitempty"`
|
||||
City string `json:"city,omitempty"`
|
||||
Latitude float64 `json:"latitude,omitempty"`
|
||||
Longitude float64 `json:"longitude,omitempty"`
|
||||
}
|
||||
|
||||
// geoDB represents a geoip database that can be queried for IP to geographical
|
||||
// information conversions.
|
||||
type geoDB struct {
|
||||
geodb *freegeoip.DB
|
||||
}
|
||||
|
||||
// Open creates a new geoip database with an up-to-date database from the internet.
|
||||
func openGeoDB() (*geoDB, error) {
|
||||
// Initiate a geoip database to cross reference locations
|
||||
db, err := freegeoip.OpenURL(freegeoip.MaxMindDB, 24*time.Hour, time.Hour)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Wait until the database is updated to the latest data
|
||||
select {
|
||||
case <-db.NotifyOpen():
|
||||
case err := <-db.NotifyError():
|
||||
return nil, err
|
||||
}
|
||||
// Assemble and return our custom wrapper
|
||||
return &geoDB{geodb: db}, nil
|
||||
}
|
||||
|
||||
// Close terminates the database background updater.
|
||||
func (db *geoDB) close() error {
|
||||
db.geodb.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lookup converts an IP address to a geographical location.
|
||||
func (db *geoDB) lookup(ip net.IP) *geoDBInfo {
|
||||
result := new(geoDBInfo)
|
||||
db.geodb.Lookup(ip, result)
|
||||
return result
|
||||
}
|
||||
|
||||
// Location retrieves the geographical location of the given IP address.
|
||||
func (db *geoDB) location(ip string) *geoLocation {
|
||||
location := db.lookup(net.ParseIP(ip))
|
||||
return &geoLocation{
|
||||
Country: location.Country.Names.English,
|
||||
City: location.City.Names.English,
|
||||
Latitude: location.Location.Latitude,
|
||||
Longitude: location.Location.Longitude,
|
||||
}
|
||||
}
|
|
@ -94,13 +94,13 @@ func (db *Dashboard) handleLogRequest(r *LogsRequest, c *client) {
|
|||
// The last file is continuously updated, and its chunks are streamed,
|
||||
// so in order to avoid log record duplication on the client side, it is
|
||||
// handled differently. Its actual content is always saved in the history.
|
||||
db.lock.Lock()
|
||||
db.logLock.RLock()
|
||||
if db.history.Logs != nil {
|
||||
c.msg <- &Message{
|
||||
Logs: db.history.Logs,
|
||||
Logs: deepcopy.Copy(db.history.Logs).(*LogsMessage),
|
||||
}
|
||||
}
|
||||
db.lock.Unlock()
|
||||
db.logLock.RUnlock()
|
||||
return
|
||||
case fileNames[idx] == r.Name:
|
||||
idx++
|
||||
|
@ -174,7 +174,7 @@ func (db *Dashboard) streamLogs() {
|
|||
log.Warn("Problem with file", "name", opened.Name(), "err", err)
|
||||
return
|
||||
}
|
||||
db.lock.Lock()
|
||||
db.logLock.Lock()
|
||||
db.history.Logs = &LogsMessage{
|
||||
Source: &LogFile{
|
||||
Name: fi.Name(),
|
||||
|
@ -182,7 +182,7 @@ func (db *Dashboard) streamLogs() {
|
|||
},
|
||||
Chunk: emptyChunk,
|
||||
}
|
||||
db.lock.Unlock()
|
||||
db.logLock.Unlock()
|
||||
|
||||
watcher := make(chan notify.EventInfo, 10)
|
||||
if err := notify.Watch(db.logdir, watcher, notify.Create); err != nil {
|
||||
|
@ -240,10 +240,10 @@ loop:
|
|||
log.Warn("Problem with file", "name", opened.Name(), "err", err)
|
||||
break loop
|
||||
}
|
||||
db.lock.Lock()
|
||||
db.logLock.Lock()
|
||||
db.history.Logs.Source.Name = fi.Name()
|
||||
db.history.Logs.Chunk = emptyChunk
|
||||
db.lock.Unlock()
|
||||
db.logLock.Unlock()
|
||||
case <-ticker.C: // Send log updates to the client.
|
||||
if opened == nil {
|
||||
log.Warn("The last log file is not opened")
|
||||
|
@ -266,7 +266,7 @@ loop:
|
|||
|
||||
var l *LogsMessage
|
||||
// Update the history.
|
||||
db.lock.Lock()
|
||||
db.logLock.Lock()
|
||||
if bytes.Equal(db.history.Logs.Chunk, emptyChunk) {
|
||||
db.history.Logs.Chunk = chunk
|
||||
l = deepcopy.Copy(db.history.Logs).(*LogsMessage)
|
||||
|
@ -278,7 +278,7 @@ loop:
|
|||
db.history.Logs.Chunk = b
|
||||
l = &LogsMessage{Chunk: chunk}
|
||||
}
|
||||
db.lock.Unlock()
|
||||
db.logLock.Unlock()
|
||||
|
||||
db.sendToAll(&Message{Logs: l})
|
||||
case errc = <-db.quit:
|
||||
|
|
|
@ -18,7 +18,6 @@ package dashboard
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
|
@ -34,8 +33,7 @@ type Message struct {
|
|||
type ChartEntries []*ChartEntry
|
||||
|
||||
type ChartEntry struct {
|
||||
Time time.Time `json:"time,omitempty"`
|
||||
Value float64 `json:"value,omitempty"`
|
||||
Value float64 `json:"value"`
|
||||
}
|
||||
|
||||
type GeneralMessage struct {
|
||||
|
@ -55,10 +53,14 @@ type TxPoolMessage struct {
|
|||
/* TODO (kurkomisi) */
|
||||
}
|
||||
|
||||
// NetworkMessage contains information about the peers
|
||||
// organized based on their IP address and node ID.
|
||||
type NetworkMessage struct {
|
||||
/* TODO (kurkomisi) */
|
||||
Peers *peerContainer `json:"peers,omitempty"` // Peer tree.
|
||||
Diff []*peerEvent `json:"diff,omitempty"` // Events that change the peer tree.
|
||||
}
|
||||
|
||||
// SystemMessage contains the metered system data samples.
|
||||
type SystemMessage struct {
|
||||
ActiveMemory ChartEntries `json:"activeMemory,omitempty"`
|
||||
VirtualMemory ChartEntries `json:"virtualMemory,omitempty"`
|
||||
|
@ -70,7 +72,7 @@ type SystemMessage struct {
|
|||
DiskWrite ChartEntries `json:"diskWrite,omitempty"`
|
||||
}
|
||||
|
||||
// LogsMessage wraps up a log chunk. If Source isn't present, the chunk is a stream chunk.
|
||||
// LogsMessage wraps up a log chunk. If 'Source' isn't present, the chunk is a stream chunk.
|
||||
type LogsMessage struct {
|
||||
Source *LogFile `json:"source,omitempty"` // Attributes of the log file.
|
||||
Chunk json.RawMessage `json:"chunk"` // Contains log records.
|
||||
|
@ -87,6 +89,7 @@ type Request struct {
|
|||
Logs *LogsRequest `json:"logs,omitempty"`
|
||||
}
|
||||
|
||||
// LogsRequest contains the attributes of the log file the client wants to receive.
|
||||
type LogsRequest struct {
|
||||
Name string `json:"name"` // The request handler searches for log file based on this file name.
|
||||
Past bool `json:"past"` // Denotes whether the client wants the previous or the next file.
|
||||
|
|
|
@ -0,0 +1,552 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
)
|
||||
|
||||
const (
|
||||
eventBufferLimit = 128 // Maximum number of buffered peer events.
|
||||
knownPeerLimit = 100 // Maximum number of stored peers, which successfully made the handshake.
|
||||
attemptLimit = 200 // Maximum number of stored peers, which failed to make the handshake.
|
||||
|
||||
// eventLimit is the maximum number of the dashboard's custom peer events,
|
||||
// that are collected between two metering period and sent to the clients
|
||||
// as one message.
|
||||
// TODO (kurkomisi): Limit the number of events.
|
||||
eventLimit = knownPeerLimit << 2
|
||||
)
|
||||
|
||||
// peerContainer contains information about the node's peers. This data structure
|
||||
// maintains the metered peer data based on the different behaviours of the peers.
|
||||
//
|
||||
// Every peer has an IP address, and the peers that manage to make the handshake
|
||||
// (known peers) have node IDs too. There can appear more peers with the same IP,
|
||||
// therefore the peer container data structure is a tree consisting of a map of
|
||||
// maps, where the first key groups the peers by IP, while the second one groups
|
||||
// them by the node ID. The known peers can be active if their connection is still
|
||||
// open, or inactive otherwise. The peers failing before the handshake (unknown
|
||||
// peers) only have IP addresses, so their connection attempts are stored as part
|
||||
// of the value of the outer map.
|
||||
//
|
||||
// Another criteria is to limit the number of metered peers so that
|
||||
// they don't fill the memory. The selection order is based on the
|
||||
// peers activity: the peers that are inactive for the longest time
|
||||
// are thrown first. For the selection a fifo list is used which is
|
||||
// linked to the bottom of the peer tree in a way that every activity
|
||||
// of the peer pushes the peer to the end of the list, so the inactive
|
||||
// ones come to the front. When a peer has some activity, it is removed
|
||||
// from and reinserted into the list. When the length of the list reaches
|
||||
// the limit, the first element is removed from the list, as well as from
|
||||
// the tree.
|
||||
//
|
||||
// The active peers have priority over the inactive ones, therefore
|
||||
// they have their own list. The separation makes it sure that the
|
||||
// inactive peers are always removed before the active ones.
|
||||
//
|
||||
// The peers that don't manage to make handshake are not inserted into the list,
|
||||
// only their connection attempts are appended to the array belonging to their IP.
|
||||
// In order to keep the fifo principle, a super array contains the order of the
|
||||
// attempts, and when the overall count reaches the limit, the earliest attempt is
|
||||
// removed from the beginning of its array.
|
||||
//
|
||||
// This data structure makes it possible to marshal the peer
|
||||
// history simply by passing it to the JSON marshaler.
|
||||
type peerContainer struct {
|
||||
// Bundles is the outer map using the peer's IP address as key.
|
||||
Bundles map[string]*peerBundle `json:"bundles,omitempty"`
|
||||
|
||||
activeCount int // Number of the still connected peers
|
||||
|
||||
// inactivePeers contains the peers with closed connection in chronological order.
|
||||
inactivePeers *list.List
|
||||
|
||||
// attemptOrder is the super array containing the IP addresses, from which
|
||||
// the peers attempted to connect then failed before/during the handshake.
|
||||
// Its values are appended in chronological order, which means that the
|
||||
// oldest attempt is at the beginning of the array. When the first element
|
||||
// is removed, the first element of the related bundle's attempt array is
|
||||
// removed too, ensuring that always the latest attempts are stored.
|
||||
attemptOrder []string
|
||||
|
||||
// geodb is the geoip database used to retrieve the peers' geographical location.
|
||||
geodb *geoDB
|
||||
}
|
||||
|
||||
// newPeerContainer returns a new instance of the peer container.
|
||||
func newPeerContainer(geodb *geoDB) *peerContainer {
|
||||
return &peerContainer{
|
||||
Bundles: make(map[string]*peerBundle),
|
||||
inactivePeers: list.New(),
|
||||
attemptOrder: make([]string, 0, attemptLimit),
|
||||
geodb: geodb,
|
||||
}
|
||||
}
|
||||
|
||||
// bundle inserts a new peer bundle into the map, if the peer belonging
|
||||
// to the given IP wasn't metered so far. In this case retrieves the location of
|
||||
// the IP address from the database and creates a corresponding peer event.
|
||||
// Returns the bundle belonging to the given IP and the events occurring during
|
||||
// the initialization.
|
||||
func (pc *peerContainer) bundle(ip string) (*peerBundle, []*peerEvent) {
|
||||
var events []*peerEvent
|
||||
if _, ok := pc.Bundles[ip]; !ok {
|
||||
location := pc.geodb.location(ip)
|
||||
events = append(events, &peerEvent{
|
||||
IP: ip,
|
||||
Location: location,
|
||||
})
|
||||
pc.Bundles[ip] = &peerBundle{
|
||||
Location: location,
|
||||
KnownPeers: make(map[string]*knownPeer),
|
||||
}
|
||||
}
|
||||
return pc.Bundles[ip], events
|
||||
}
|
||||
|
||||
// extendKnown handles the events of the successfully connected peers.
|
||||
// Returns the events occurring during the extension.
|
||||
func (pc *peerContainer) extendKnown(event *peerEvent) []*peerEvent {
|
||||
bundle, events := pc.bundle(event.IP)
|
||||
peer, peerEvents := bundle.knownPeer(event.IP, event.ID)
|
||||
events = append(events, peerEvents...)
|
||||
// Append the connect and the disconnect events to
|
||||
// the corresponding arrays keeping the limit.
|
||||
switch {
|
||||
case event.Connected != nil:
|
||||
peer.Connected = append(peer.Connected, event.Connected)
|
||||
if first := len(peer.Connected) - sampleLimit; first > 0 {
|
||||
peer.Connected = peer.Connected[first:]
|
||||
}
|
||||
peer.Active = true
|
||||
events = append(events, &peerEvent{
|
||||
Activity: Active,
|
||||
IP: peer.ip,
|
||||
ID: peer.id,
|
||||
})
|
||||
pc.activeCount++
|
||||
if peer.listElement != nil {
|
||||
_ = pc.inactivePeers.Remove(peer.listElement)
|
||||
peer.listElement = nil
|
||||
}
|
||||
case event.Disconnected != nil:
|
||||
peer.Disconnected = append(peer.Disconnected, event.Disconnected)
|
||||
if first := len(peer.Disconnected) - sampleLimit; first > 0 {
|
||||
peer.Disconnected = peer.Disconnected[first:]
|
||||
}
|
||||
peer.Active = false
|
||||
events = append(events, &peerEvent{
|
||||
Activity: Inactive,
|
||||
IP: peer.ip,
|
||||
ID: peer.id,
|
||||
})
|
||||
pc.activeCount--
|
||||
if peer.listElement != nil {
|
||||
// If the peer is already in the list, remove and reinsert it.
|
||||
_ = pc.inactivePeers.Remove(peer.listElement)
|
||||
}
|
||||
// Insert the peer into the list.
|
||||
peer.listElement = pc.inactivePeers.PushBack(peer)
|
||||
}
|
||||
for pc.inactivePeers.Len() > 0 && pc.activeCount+pc.inactivePeers.Len() > knownPeerLimit {
|
||||
// While the count of the known peers is greater than the limit,
|
||||
// remove the first element from the inactive peer list and from the map.
|
||||
if removedPeer, ok := pc.inactivePeers.Remove(pc.inactivePeers.Front()).(*knownPeer); ok {
|
||||
events = append(events, pc.removeKnown(removedPeer.ip, removedPeer.id)...)
|
||||
} else {
|
||||
log.Warn("Failed to parse the removed peer")
|
||||
}
|
||||
}
|
||||
if pc.activeCount > knownPeerLimit {
|
||||
log.Warn("Number of active peers is greater than the limit")
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
// handleAttempt handles the events of the peers failing before/during the handshake.
|
||||
// Returns the events occurring during the extension.
|
||||
func (pc *peerContainer) handleAttempt(event *peerEvent) []*peerEvent {
|
||||
bundle, events := pc.bundle(event.IP)
|
||||
bundle.Attempts = append(bundle.Attempts, &peerAttempt{
|
||||
Connected: *event.Connected,
|
||||
Disconnected: *event.Disconnected,
|
||||
})
|
||||
pc.attemptOrder = append(pc.attemptOrder, event.IP)
|
||||
for len(pc.attemptOrder) > attemptLimit {
|
||||
// While the length of the connection attempt order array is greater
|
||||
// than the limit, remove the first element from the involved peer's
|
||||
// array and also from the super array.
|
||||
events = append(events, pc.removeAttempt(pc.attemptOrder[0])...)
|
||||
pc.attemptOrder = pc.attemptOrder[1:]
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
// peerBundle contains the peers belonging to a given IP address.
|
||||
type peerBundle struct {
|
||||
// Location contains the geographical location based on the bundle's IP address.
|
||||
Location *geoLocation `json:"location,omitempty"`
|
||||
|
||||
// KnownPeers is the inner map of the metered peer
|
||||
// maintainer data structure using the node ID as key.
|
||||
KnownPeers map[string]*knownPeer `json:"knownPeers,omitempty"`
|
||||
|
||||
// Attempts contains the failed connection attempts of the
|
||||
// peers belonging to a given IP address in chronological order.
|
||||
Attempts []*peerAttempt `json:"attempts,omitempty"`
|
||||
}
|
||||
|
||||
// removeKnown removes the known peer belonging to the
|
||||
// given IP address and node ID from the peer tree.
|
||||
func (pc *peerContainer) removeKnown(ip, id string) (events []*peerEvent) {
|
||||
// TODO (kurkomisi): Remove peers that don't have traffic samples anymore.
|
||||
if bundle, ok := pc.Bundles[ip]; ok {
|
||||
if _, ok := bundle.KnownPeers[id]; ok {
|
||||
events = append(events, &peerEvent{
|
||||
Remove: RemoveKnown,
|
||||
IP: ip,
|
||||
ID: id,
|
||||
})
|
||||
delete(bundle.KnownPeers, id)
|
||||
} else {
|
||||
log.Warn("No peer to remove", "ip", ip, "id", id)
|
||||
}
|
||||
if len(bundle.KnownPeers) < 1 && len(bundle.Attempts) < 1 {
|
||||
events = append(events, &peerEvent{
|
||||
Remove: RemoveBundle,
|
||||
IP: ip,
|
||||
})
|
||||
delete(pc.Bundles, ip)
|
||||
}
|
||||
} else {
|
||||
log.Warn("No bundle to remove", "ip", ip)
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
// removeAttempt removes the peer attempt belonging to the
|
||||
// given IP address and node ID from the peer tree.
|
||||
func (pc *peerContainer) removeAttempt(ip string) (events []*peerEvent) {
|
||||
if bundle, ok := pc.Bundles[ip]; ok {
|
||||
if len(bundle.Attempts) > 0 {
|
||||
events = append(events, &peerEvent{
|
||||
Remove: RemoveAttempt,
|
||||
IP: ip,
|
||||
})
|
||||
bundle.Attempts = bundle.Attempts[1:]
|
||||
}
|
||||
if len(bundle.Attempts) < 1 && len(bundle.KnownPeers) < 1 {
|
||||
events = append(events, &peerEvent{
|
||||
Remove: RemoveBundle,
|
||||
IP: ip,
|
||||
})
|
||||
delete(pc.Bundles, ip)
|
||||
}
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
// knownPeer inserts a new peer into the map, if the peer belonging
|
||||
// to the given IP address and node ID wasn't metered so far. Returns the peer
|
||||
// belonging to the given IP and ID as well as the events occurring during the
|
||||
// initialization.
|
||||
func (bundle *peerBundle) knownPeer(ip, id string) (*knownPeer, []*peerEvent) {
|
||||
var events []*peerEvent
|
||||
if _, ok := bundle.KnownPeers[id]; !ok {
|
||||
now := time.Now()
|
||||
ingress := emptyChartEntries(now, sampleLimit)
|
||||
egress := emptyChartEntries(now, sampleLimit)
|
||||
events = append(events, &peerEvent{
|
||||
IP: ip,
|
||||
ID: id,
|
||||
Ingress: append([]*ChartEntry{}, ingress...),
|
||||
Egress: append([]*ChartEntry{}, egress...),
|
||||
})
|
||||
bundle.KnownPeers[id] = &knownPeer{
|
||||
ip: ip,
|
||||
id: id,
|
||||
Ingress: ingress,
|
||||
Egress: egress,
|
||||
}
|
||||
}
|
||||
return bundle.KnownPeers[id], events
|
||||
}
|
||||
|
||||
// knownPeer contains the metered data of a particular peer.
|
||||
type knownPeer struct {
|
||||
// Connected contains the timestamps of the peer's connection events.
|
||||
Connected []*time.Time `json:"connected,omitempty"`
|
||||
|
||||
// Disconnected contains the timestamps of the peer's disconnection events.
|
||||
Disconnected []*time.Time `json:"disconnected,omitempty"`
|
||||
|
||||
// Ingress and Egress contain the peer's traffic samples, which are collected
|
||||
// periodically from the metrics registry.
|
||||
//
|
||||
// A peer can connect multiple times, and we want to visualize the time
|
||||
// passed between two connections, so after the first connection a 0 value
|
||||
// is appended to the traffic arrays even if the peer is inactive until the
|
||||
// peer is removed.
|
||||
Ingress ChartEntries `json:"ingress,omitempty"`
|
||||
Egress ChartEntries `json:"egress,omitempty"`
|
||||
|
||||
Active bool `json:"active"` // Denotes if the peer is still connected.
|
||||
|
||||
listElement *list.Element // Pointer to the peer element in the list.
|
||||
ip, id string // The IP and the ID by which the peer can be accessed in the tree.
|
||||
prevIngress float64
|
||||
prevEgress float64
|
||||
}
|
||||
|
||||
// peerAttempt contains a failed peer connection attempt's attributes.
|
||||
type peerAttempt struct {
|
||||
// Connected contains the timestamp of the connection attempt's moment.
|
||||
Connected time.Time `json:"connected"`
|
||||
|
||||
// Disconnected contains the timestamp of the
|
||||
// moment when the connection attempt failed.
|
||||
Disconnected time.Time `json:"disconnected"`
|
||||
}
|
||||
|
||||
type RemovedPeerType string
|
||||
type ActivityType string
|
||||
|
||||
const (
|
||||
RemoveKnown RemovedPeerType = "known"
|
||||
RemoveAttempt RemovedPeerType = "attempt"
|
||||
RemoveBundle RemovedPeerType = "bundle"
|
||||
|
||||
Active ActivityType = "active"
|
||||
Inactive ActivityType = "inactive"
|
||||
)
|
||||
|
||||
// peerEvent contains the attributes of a peer event.
|
||||
type peerEvent struct {
|
||||
IP string `json:"ip,omitempty"` // IP address of the peer.
|
||||
ID string `json:"id,omitempty"` // Node ID of the peer.
|
||||
Remove RemovedPeerType `json:"remove,omitempty"` // Type of the peer that is to be removed.
|
||||
Location *geoLocation `json:"location,omitempty"` // Geographical location of the peer.
|
||||
Connected *time.Time `json:"connected,omitempty"` // Timestamp of the connection moment.
|
||||
Disconnected *time.Time `json:"disconnected,omitempty"` // Timestamp of the disonnection moment.
|
||||
Ingress ChartEntries `json:"ingress,omitempty"` // Ingress samples.
|
||||
Egress ChartEntries `json:"egress,omitempty"` // Egress samples.
|
||||
Activity ActivityType `json:"activity,omitempty"` // Connection status change.
|
||||
}
|
||||
|
||||
// trafficMap is a container for the periodically collected peer traffic.
|
||||
type trafficMap map[string]map[string]float64
|
||||
|
||||
// insert inserts a new value to the traffic map. Overwrites
|
||||
// the value at the given ip and id if that already exists.
|
||||
func (m *trafficMap) insert(ip, id string, val float64) {
|
||||
if _, ok := (*m)[ip]; !ok {
|
||||
(*m)[ip] = make(map[string]float64)
|
||||
}
|
||||
(*m)[ip][id] = val
|
||||
}
|
||||
|
||||
// collectPeerData gathers data about the peers and sends it to the clients.
|
||||
func (db *Dashboard) collectPeerData() {
|
||||
defer db.wg.Done()
|
||||
|
||||
// Open the geodb database for IP to geographical information conversions.
|
||||
var err error
|
||||
db.geodb, err = openGeoDB()
|
||||
if err != nil {
|
||||
log.Warn("Failed to open geodb", "err", err)
|
||||
return
|
||||
}
|
||||
defer db.geodb.close()
|
||||
|
||||
peerCh := make(chan p2p.MeteredPeerEvent, eventBufferLimit) // Peer event channel.
|
||||
subPeer := p2p.SubscribeMeteredPeerEvent(peerCh) // Subscribe to peer events.
|
||||
defer subPeer.Unsubscribe() // Unsubscribe at the end.
|
||||
|
||||
ticker := time.NewTicker(db.config.Refresh)
|
||||
defer ticker.Stop()
|
||||
|
||||
type registryFunc func(name string, i interface{})
|
||||
type collectorFunc func(traffic *trafficMap) registryFunc
|
||||
|
||||
// trafficCollector generates a function that can be passed to
|
||||
// the prefixed peer registry in order to collect the metered
|
||||
// traffic data from each peer meter.
|
||||
trafficCollector := func(prefix string) collectorFunc {
|
||||
// This part makes is possible to collect the
|
||||
// traffic data into a map from outside.
|
||||
return func(traffic *trafficMap) registryFunc {
|
||||
// The function which can be passed to the registry.
|
||||
return func(name string, i interface{}) {
|
||||
if m, ok := i.(metrics.Meter); ok {
|
||||
// The name of the meter has the format: <common traffic prefix><IP>/<ID>
|
||||
if k := strings.Split(strings.TrimPrefix(name, prefix), "/"); len(k) == 2 {
|
||||
traffic.insert(k[0], k[1], float64(m.Count()))
|
||||
} else {
|
||||
log.Warn("Invalid meter name", "name", name, "prefix", prefix)
|
||||
}
|
||||
} else {
|
||||
log.Warn("Invalid meter type", "name", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
collectIngress := trafficCollector(p2p.MetricsInboundTraffic + "/")
|
||||
collectEgress := trafficCollector(p2p.MetricsOutboundTraffic + "/")
|
||||
|
||||
peers := newPeerContainer(db.geodb)
|
||||
db.peerLock.Lock()
|
||||
db.history.Network = &NetworkMessage{
|
||||
Peers: peers,
|
||||
}
|
||||
db.peerLock.Unlock()
|
||||
|
||||
// newPeerEvents contains peer events, which trigger operations that
|
||||
// will be executed on the peer tree after a metering period.
|
||||
newPeerEvents := make([]*peerEvent, 0, eventLimit)
|
||||
ingress, egress := new(trafficMap), new(trafficMap)
|
||||
*ingress, *egress = make(trafficMap), make(trafficMap)
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-peerCh:
|
||||
now := time.Now()
|
||||
switch event.Type {
|
||||
case p2p.PeerConnected:
|
||||
connected := now.Add(-event.Elapsed)
|
||||
newPeerEvents = append(newPeerEvents, &peerEvent{
|
||||
IP: event.IP.String(),
|
||||
ID: event.ID.String(),
|
||||
Connected: &connected,
|
||||
})
|
||||
case p2p.PeerDisconnected:
|
||||
ip, id := event.IP.String(), event.ID.String()
|
||||
newPeerEvents = append(newPeerEvents, &peerEvent{
|
||||
IP: ip,
|
||||
ID: id,
|
||||
Disconnected: &now,
|
||||
})
|
||||
// The disconnect event comes with the last metered traffic count,
|
||||
// because after the disconnection the peer's meter is removed
|
||||
// from the registry. It can happen, that between two metering
|
||||
// period the same peer disconnects multiple times, and appending
|
||||
// all the samples to the traffic arrays would shift the metering,
|
||||
// so only the last metering is stored, overwriting the previous one.
|
||||
ingress.insert(ip, id, float64(event.Ingress))
|
||||
egress.insert(ip, id, float64(event.Egress))
|
||||
case p2p.PeerHandshakeFailed:
|
||||
connected := now.Add(-event.Elapsed)
|
||||
newPeerEvents = append(newPeerEvents, &peerEvent{
|
||||
IP: event.IP.String(),
|
||||
Connected: &connected,
|
||||
Disconnected: &now,
|
||||
})
|
||||
default:
|
||||
log.Error("Unknown metered peer event type", "type", event.Type)
|
||||
}
|
||||
case <-ticker.C:
|
||||
// Collect the traffic samples from the registry.
|
||||
p2p.PeerIngressRegistry.Each(collectIngress(ingress))
|
||||
p2p.PeerEgressRegistry.Each(collectEgress(egress))
|
||||
|
||||
// Protect 'peers', because it is part of the history.
|
||||
db.peerLock.Lock()
|
||||
|
||||
var diff []*peerEvent
|
||||
for i := 0; i < len(newPeerEvents); i++ {
|
||||
if newPeerEvents[i].IP == "" {
|
||||
log.Warn("Peer event without IP", "event", *newPeerEvents[i])
|
||||
continue
|
||||
}
|
||||
diff = append(diff, newPeerEvents[i])
|
||||
// There are two main branches of peer events coming from the event
|
||||
// feed, one belongs to the known peers, one to the unknown peers.
|
||||
// If the event has node ID, it belongs to a known peer, otherwise
|
||||
// to an unknown one, which is considered as connection attempt.
|
||||
//
|
||||
// The extension can produce additional peer events, such
|
||||
// as remove, location and initial samples events.
|
||||
if newPeerEvents[i].ID == "" {
|
||||
diff = append(diff, peers.handleAttempt(newPeerEvents[i])...)
|
||||
continue
|
||||
}
|
||||
diff = append(diff, peers.extendKnown(newPeerEvents[i])...)
|
||||
}
|
||||
// Update the peer tree using the traffic maps.
|
||||
for ip, bundle := range peers.Bundles {
|
||||
for id, peer := range bundle.KnownPeers {
|
||||
// Value is 0 if the traffic map doesn't have the
|
||||
// entry corresponding to the given IP and ID.
|
||||
curIngress, curEgress := (*ingress)[ip][id], (*egress)[ip][id]
|
||||
deltaIngress, deltaEgress := curIngress, curEgress
|
||||
if deltaIngress >= peer.prevIngress {
|
||||
deltaIngress -= peer.prevIngress
|
||||
}
|
||||
if deltaEgress >= peer.prevEgress {
|
||||
deltaEgress -= peer.prevEgress
|
||||
}
|
||||
peer.prevIngress, peer.prevEgress = curIngress, curEgress
|
||||
i := &ChartEntry{
|
||||
Value: deltaIngress,
|
||||
}
|
||||
e := &ChartEntry{
|
||||
Value: deltaEgress,
|
||||
}
|
||||
peer.Ingress = append(peer.Ingress, i)
|
||||
peer.Egress = append(peer.Egress, e)
|
||||
if first := len(peer.Ingress) - sampleLimit; first > 0 {
|
||||
peer.Ingress = peer.Ingress[first:]
|
||||
}
|
||||
if first := len(peer.Egress) - sampleLimit; first > 0 {
|
||||
peer.Egress = peer.Egress[first:]
|
||||
}
|
||||
// Creating the traffic sample events.
|
||||
diff = append(diff, &peerEvent{
|
||||
IP: ip,
|
||||
ID: id,
|
||||
Ingress: ChartEntries{i},
|
||||
Egress: ChartEntries{e},
|
||||
})
|
||||
}
|
||||
}
|
||||
db.peerLock.Unlock()
|
||||
|
||||
if len(diff) > 0 {
|
||||
db.sendToAll(&Message{Network: &NetworkMessage{
|
||||
Diff: diff,
|
||||
}})
|
||||
}
|
||||
// Clear the traffic maps, and the event array,
|
||||
// prepare them for the next metering.
|
||||
*ingress, *egress = make(trafficMap), make(trafficMap)
|
||||
newPeerEvents = newPeerEvents[:0]
|
||||
case err := <-subPeer.Err():
|
||||
log.Warn("Peer subscription error", "err", err)
|
||||
return
|
||||
case errc := <-db.quit:
|
||||
errc <- nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,146 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/gosigar"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
)
|
||||
|
||||
// meterCollector returns a function, which retrieves the count of a specific meter.
|
||||
func meterCollector(name string) func() int64 {
|
||||
if meter := metrics.Get(name); meter != nil {
|
||||
m := meter.(metrics.Meter)
|
||||
return func() int64 {
|
||||
return m.Count()
|
||||
}
|
||||
}
|
||||
return func() int64 {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// collectSystemData gathers data about the system and sends it to the clients.
|
||||
func (db *Dashboard) collectSystemData() {
|
||||
defer db.wg.Done()
|
||||
|
||||
systemCPUUsage := gosigar.Cpu{}
|
||||
systemCPUUsage.Get()
|
||||
var (
|
||||
mem runtime.MemStats
|
||||
|
||||
collectNetworkIngress = meterCollector(p2p.MetricsInboundTraffic)
|
||||
collectNetworkEgress = meterCollector(p2p.MetricsOutboundTraffic)
|
||||
collectDiskRead = meterCollector("eth/db/chaindata/disk/read")
|
||||
collectDiskWrite = meterCollector("eth/db/chaindata/disk/write")
|
||||
|
||||
prevNetworkIngress = collectNetworkIngress()
|
||||
prevNetworkEgress = collectNetworkEgress()
|
||||
prevProcessCPUTime = getProcessCPUTime()
|
||||
prevSystemCPUUsage = systemCPUUsage
|
||||
prevDiskRead = collectDiskRead()
|
||||
prevDiskWrite = collectDiskWrite()
|
||||
|
||||
frequency = float64(db.config.Refresh / time.Second)
|
||||
numCPU = float64(runtime.NumCPU())
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case errc := <-db.quit:
|
||||
errc <- nil
|
||||
return
|
||||
case <-time.After(db.config.Refresh):
|
||||
systemCPUUsage.Get()
|
||||
var (
|
||||
curNetworkIngress = collectNetworkIngress()
|
||||
curNetworkEgress = collectNetworkEgress()
|
||||
curProcessCPUTime = getProcessCPUTime()
|
||||
curSystemCPUUsage = systemCPUUsage
|
||||
curDiskRead = collectDiskRead()
|
||||
curDiskWrite = collectDiskWrite()
|
||||
|
||||
deltaNetworkIngress = float64(curNetworkIngress - prevNetworkIngress)
|
||||
deltaNetworkEgress = float64(curNetworkEgress - prevNetworkEgress)
|
||||
deltaProcessCPUTime = curProcessCPUTime - prevProcessCPUTime
|
||||
deltaSystemCPUUsage = curSystemCPUUsage.Delta(prevSystemCPUUsage)
|
||||
deltaDiskRead = curDiskRead - prevDiskRead
|
||||
deltaDiskWrite = curDiskWrite - prevDiskWrite
|
||||
)
|
||||
prevNetworkIngress = curNetworkIngress
|
||||
prevNetworkEgress = curNetworkEgress
|
||||
prevProcessCPUTime = curProcessCPUTime
|
||||
prevSystemCPUUsage = curSystemCPUUsage
|
||||
prevDiskRead = curDiskRead
|
||||
prevDiskWrite = curDiskWrite
|
||||
|
||||
runtime.ReadMemStats(&mem)
|
||||
activeMemory := &ChartEntry{
|
||||
Value: float64(mem.Alloc) / frequency,
|
||||
}
|
||||
virtualMemory := &ChartEntry{
|
||||
Value: float64(mem.Sys) / frequency,
|
||||
}
|
||||
networkIngress := &ChartEntry{
|
||||
Value: deltaNetworkIngress / frequency,
|
||||
}
|
||||
networkEgress := &ChartEntry{
|
||||
Value: deltaNetworkEgress / frequency,
|
||||
}
|
||||
processCPU := &ChartEntry{
|
||||
Value: deltaProcessCPUTime / frequency / numCPU * 100,
|
||||
}
|
||||
systemCPU := &ChartEntry{
|
||||
Value: float64(deltaSystemCPUUsage.Sys+deltaSystemCPUUsage.User) / frequency / numCPU,
|
||||
}
|
||||
diskRead := &ChartEntry{
|
||||
Value: float64(deltaDiskRead) / frequency,
|
||||
}
|
||||
diskWrite := &ChartEntry{
|
||||
Value: float64(deltaDiskWrite) / frequency,
|
||||
}
|
||||
db.sysLock.Lock()
|
||||
sys := db.history.System
|
||||
sys.ActiveMemory = append(sys.ActiveMemory[1:], activeMemory)
|
||||
sys.VirtualMemory = append(sys.VirtualMemory[1:], virtualMemory)
|
||||
sys.NetworkIngress = append(sys.NetworkIngress[1:], networkIngress)
|
||||
sys.NetworkEgress = append(sys.NetworkEgress[1:], networkEgress)
|
||||
sys.ProcessCPU = append(sys.ProcessCPU[1:], processCPU)
|
||||
sys.SystemCPU = append(sys.SystemCPU[1:], systemCPU)
|
||||
sys.DiskRead = append(sys.DiskRead[1:], diskRead)
|
||||
sys.DiskWrite = append(sys.DiskWrite[1:], diskWrite)
|
||||
db.sysLock.Unlock()
|
||||
|
||||
db.sendToAll(&Message{
|
||||
System: &SystemMessage{
|
||||
ActiveMemory: ChartEntries{activeMemory},
|
||||
VirtualMemory: ChartEntries{virtualMemory},
|
||||
NetworkIngress: ChartEntries{networkIngress},
|
||||
NetworkEgress: ChartEntries{networkEgress},
|
||||
ProcessCPU: ChartEntries{processCPU},
|
||||
SystemCPU: ChartEntries{systemCPU},
|
||||
DiskRead: ChartEntries{diskRead},
|
||||
DiskWrite: ChartEntries{diskWrite},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
|
@ -161,6 +161,7 @@ func (c *meteredConn) Write(b []byte) (n int, err error) {
|
|||
// the ingress and the egress traffic registries using the peer's IP and node ID,
|
||||
// also emits connect event.
|
||||
func (c *meteredConn) handshakeDone(id enode.ID) {
|
||||
// TODO (kurkomisi): use the node URL instead of the pure node ID. (the String() method of *Node)
|
||||
if atomic.AddInt32(&meteredPeerCount, 1) >= MeteredPeerLimit {
|
||||
// Don't register the peer in the traffic registries.
|
||||
atomic.AddInt32(&meteredPeerCount, -1)
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
# This is the official list of freegeoip authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS file.
|
||||
#
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
#
|
||||
# The email address is not required for organizations.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Alexandre Fiori <fiorix@gmail.com>
|
|
@ -0,0 +1,22 @@
|
|||
# This is the official list of freegeoip contributors for copyright purposes.
|
||||
# This file is distinct from the AUTHORS file.
|
||||
#
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
#
|
||||
# Use the following command to generate the list:
|
||||
#
|
||||
# git shortlog -se | awk '{print $2 " " $3 " " $4}'
|
||||
#
|
||||
# The email address is not required for organizations.
|
||||
|
||||
Alex Goretoy <alex@goretoy.com>
|
||||
Gleicon Moraes <gleicon@gmail.com>
|
||||
Leandro Pereira <leandro@hardinfo.org>
|
||||
Lucas Fontes <lxfontes@gmail.com>
|
||||
Matthias Nehlsen <matthias.nehlsen@gmail.com>
|
||||
Melchi <melchi.si@gmail.com>
|
||||
Nick Muerdter <stuff@nickm.org>
|
||||
Vladimir Agafonkin <agafonkin@gmail.com>
|
|
@ -0,0 +1,25 @@
|
|||
FROM golang:1.9
|
||||
|
||||
COPY cmd/freegeoip/public /var/www
|
||||
|
||||
ADD . /go/src/github.com/apilayer/freegeoip
|
||||
RUN \
|
||||
cd /go/src/github.com/apilayer/freegeoip/cmd/freegeoip && \
|
||||
go get -d && go install && \
|
||||
apt-get update && apt-get install -y libcap2-bin && \
|
||||
setcap cap_net_bind_service=+ep /go/bin/freegeoip && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/* && \
|
||||
useradd -ms /bin/bash freegeoip
|
||||
|
||||
USER freegeoip
|
||||
ENTRYPOINT ["/go/bin/freegeoip"]
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
# CMD instructions:
|
||||
# Add "-use-x-forwarded-for" if your server is behind a reverse proxy
|
||||
# Add "-public", "/var/www" to enable the web front-end
|
||||
# Add "-internal-server", "8888" to enable the pprof+metrics server
|
||||
#
|
||||
# Example:
|
||||
# CMD ["-use-x-forwarded-for", "-public", "/var/www", "-internal-server", "8888"]
|
|
@ -0,0 +1,55 @@
|
|||
# History of freegeoip.net
|
||||
|
||||
The freegeoip software is the result of a web server research project that
|
||||
started in 2009, written in Python and hosted on
|
||||
[Google App Engine](http://appengine.google.com). It was rapidly adopted by
|
||||
many developers around the world due to its simplistic and straightforward
|
||||
HTTP API, causing the free account on GAE to exceed its quota every day
|
||||
after few hours of operation.
|
||||
|
||||
A year later freegeoip 1.0 was released, and the freegeoip.net domain
|
||||
moved over to its own server infrastructure. The software was rewritten
|
||||
using the [Cyclone](http://cyclone.io) web framework, backed by
|
||||
[Twisted](http://twistedmatrix.com) and [PyPy](http://pypy.org) in
|
||||
production. That's when the first database management tool was created,
|
||||
a script that would download many pieces of information from the Internet
|
||||
to create the IP database, an sqlite flat file used by the server.
|
||||
|
||||
This version of the Python server shipped with a much better front-end as
|
||||
well, but still as a server-side rendered template inherited from the GAE
|
||||
version. It was only circa 2011 that freegeoip got its first standalone
|
||||
front-end based on jQuery, and is when Twitter bootstrap was first used.
|
||||
|
||||
Python played an important role in the early life of freegeoip and
|
||||
allowed the service to grow and evolve fast. It provided a lot of
|
||||
flexibility in building and maintaining the IP database using multiple
|
||||
sources of data. This version of the server lasted until 2013, when
|
||||
it was once again rewritten from scratch, this time in Go. The database
|
||||
tool, however, remained intact.
|
||||
|
||||
In 2013 the Go version was released as freegeoip 2.0 and this version
|
||||
had many iterations. The first versions of the server written in Go were
|
||||
very rustic, practically a verbatim transcription of the Python server.
|
||||
Took a while until it started looking more like common Go code, and to
|
||||
have tests.
|
||||
|
||||
Another important change that shipped with v2 was a front-end based on
|
||||
AngularJS, but still mixed with some jQuery. The Google map in the front
|
||||
page was made optional to put more focus on the HTTP API. The popularity
|
||||
of freegeoip has increased considerably over the years of 2013 and 2014,
|
||||
calling for more.
|
||||
|
||||
Enter freegeoip 3.0, an evolution of the Go server. The foundation of
|
||||
freegeoip, which is the IP database and HTTP API, now lives in a Go
|
||||
package that other developers can leverage. The freegeoip web server is
|
||||
built on this package making its code cleaner, the server faster,
|
||||
and requires zero maintenance for the IP database. The server downloads
|
||||
the file from MaxMind and keep it up to date in background.
|
||||
|
||||
This and other changes make it very Docker friendly.
|
||||
|
||||
The front-end has been trimmed down to a single index.html file that loads
|
||||
CSS and JS from CDNs on the internet. The JS part is based on AngularJS
|
||||
and handles the search request and response of the public site. The
|
||||
optional map has become a link to Google Maps following the lat/long
|
||||
of the query results.
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The freegeoip authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* The names of authors or contributors may NOT be used to endorse or
|
||||
promote products derived from this software without specific prior
|
||||
written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1 @@
|
|||
web: freegeoip -http :${PORT} -use-x-forwarded-for -public /app/cmd/freegeoip/public -quota-backend map -quota-max 10000
|
|
@ -0,0 +1,259 @@
|
|||
![freegeoip ipstack](https://raw.githubusercontent.com/apilayer/freegeoip/master/freegeo-warning.png)
|
||||
|
||||
# freegeoip - Important Announcement
|
||||
|
||||
*[The old freegeoip API is now deprecated and will be discontinued on July 1st, 2018]*
|
||||
|
||||
Launched more than 6 years ago, the freegeoip.net API has grown into one of the biggest and most widely used APIs for IP to location services worldwide. The API is used by thousands of developers, SMBs and large corporations around the globe and is currently handling more than 2 billion requests per day. After years of operation and the API remaining almost unchanged, today we announce the complete re-launch of freegeoip into a faster, more advanced and more scalable API service called ipstack (https://ipstack.com). All users that wish to continue using our IP to location service will be required to sign up to obtain a free API access key and perform a few simple changes to their integration. While the new API offers the ability to return data in the same structure as the old freegeoip API, the new API structure offers various options of delivering much more advanced data for IP Addresses.
|
||||
|
||||
## Required Changes to Legacy Integrations (freegeoip.net/json/xml)
|
||||
|
||||
As of March 31 2018 the old freegeoip API is deprecated and a completely re-designed API is now accessible at http://api.ipstack.com. While the new API offers the same capabilities as the old one and also has the option of returning data in the legacy format, the API URL has now changed and all users are required to sign up for a free API Access Key to use the service.
|
||||
|
||||
1. Get a free ipstack Account and Access Key
|
||||
|
||||
Head over to https://ipstack.com and follow the instructions to create your account and obtain your access token. If you only need basic IP to Geolocation data and do not require more than 10,000 requests per month, you can use the free account. If you'd like more advanced features or more requests than included in the free account you will need to choose one of the paid options. You can find an overview of all available plans at https://ipstack.com/product
|
||||
|
||||
2. Integrate the new API URL
|
||||
|
||||
The new API comes with a completely new endpoint (api.ipstack.com) and requires you to append your API Access Key to the URL as a GET parameter. For complete integration instructions, please head over to the API Documentation at https://ipstack.com/documentation. While the new API offers a completely reworked response structure with many additional data points, we also offer the option to receive results in the old freegeoip.net format in JSON or XML.
|
||||
|
||||
To receive your API results in the old freegeoip format, please simply append &legacy=1 to the new API URL.
|
||||
|
||||
JSON Example: http://api.ipstack.com/186.116.207.169?access_key=YOUR_ACCESS_KEY&output=json&legacy=1
|
||||
|
||||
XML Example: http://api.ipstack.com/186.116.207.169?access_key=YOUR_ACCESS_KEY&output=xml&legacy=1
|
||||
|
||||
## New features with ipstack
|
||||
While the new ipstack service now runs on a commercial/freemium model, we have worked hard at building a faster, more scalable, and more advanced IP to location API product. You can read more about all the new features by navigating to https://ipstack.com, but here's a list of the most important changes and additions:
|
||||
|
||||
- We're still free for basic usage
|
||||
|
||||
While we now offer paid / premium options for our more advanced users, our core product and IP to Country/Region/City product is still completely free of charge for up to 10,000 requests per month. If you need more advanced data or more requests, you can choose one of the paid plans listed at https://ipstack.com/product
|
||||
|
||||
- Batch Requests
|
||||
|
||||
Need to validate more than 1 IP Address in a single API Call? Our new Bulk Lookup Feature (available on our paid plans) allows you to geolocate up to 50 IP Addresses in a single API Call.
|
||||
|
||||
- Much more Data
|
||||
|
||||
While the old freegeoip API was limited to provide only the most basic IP to location data, our new API provides more than 20 additional data points including Language, Time Zone, Current Time, Currencies, Connection & ASN Information, and much more. To learn more about all the data points available, please head over to the ipstack website.
|
||||
|
||||
- Security & Fraud Prevention Tools
|
||||
|
||||
Do you want to prevent fraudulent traffic from arriving at your website or from abusing your service? Easily spot malicious / proxy / VPN traffic by using our new Security Module, which outputs a lot of valuable security information about an IP Address.
|
||||
|
||||
Next Steps
|
||||
|
||||
- Deprecation of the old API
|
||||
|
||||
While we want to keep the disruption to our current users as minimal as possible, we are planning to shut the old API down on July 1st, 2018. This should give all users enough time to adapt to changes, and should we still see high volumes of traffic going to the old API by that date, we may decide to extend it further. In any case, we highly recommend you switch to the new API as soon as possible. We will keep you posted here about any changes to the planned shutdown date.
|
||||
|
||||
- Any Questions? Please get in touch!
|
||||
|
||||
It's very important to ensure a smooth transition to ipstack for all freegeoip API users. If you are a developer that has published a plugin/addon that includes the legacy API, we recommend you get in touch with us and also share this announcement with your users. If you have any questions about the transition or the new API, please get in touch with us at support@ipstack.com
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# freegeoip - Deprecated Documentation
|
||||
|
||||
[![Deploy](https://www.herokucdn.com/deploy/button.svg)](https://heroku.com/deploy)
|
||||
|
||||
This is the source code of the freegeoip software. It contains both the web server that empowers freegeoip.net, and a package for the [Go](http://golang.org) programming language that enables any web server to support IP geolocation with a simple and clean API.
|
||||
|
||||
See http://en.wikipedia.org/wiki/Geolocation for details about geolocation.
|
||||
|
||||
Developers looking for the Go API can skip to the [Package freegeoip](#packagefreegeoip) section below.
|
||||
|
||||
## Running
|
||||
|
||||
This section is for people who desire to run the freegeoip web server on their own infrastructure. The easiest and most generic way of doing this is by using Docker. All examples below use Docker.
|
||||
|
||||
### Docker
|
||||
|
||||
#### Install Docker
|
||||
|
||||
Docker has [install instructions for many platforms](https://docs.docker.com/engine/installation/),
|
||||
including
|
||||
- [Ubuntu](https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/)
|
||||
- [CentOS](https://docs.docker.com/engine/installation/linux/docker-ce/centos/)
|
||||
- [Mac](https://docs.docker.com/docker-for-mac/install/)
|
||||
|
||||
#### Run the API in a container
|
||||
|
||||
```bash
|
||||
docker run --restart=always -p 8080:8080 -d apilayer/freegeoip
|
||||
```
|
||||
|
||||
#### Test
|
||||
|
||||
```bash
|
||||
curl localhost:8080/json/1.2.3.4
|
||||
# => {"ip":"1.2.3.4","country_code":"US","country_name":"United States", # ...
|
||||
```
|
||||
|
||||
### Other Linux, OS X, FreeBSD, and Windows
|
||||
|
||||
There are [pre-compiled binaries](https://github.com/apilayer/freegeoip/releases) available.
|
||||
|
||||
### Production configuration
|
||||
|
||||
For production workloads you may want to use different configuration for the freegeoip web server, for example:
|
||||
|
||||
* Enabling the "internal server" for collecting metrics and profiling/tracing the freegeoip web server on demand
|
||||
* Monitoring the internal server using [Prometheus](https://prometheus.io), or exporting your metrics to [New Relic](https://newrelic.com)
|
||||
* Serving the freegeoip API over HTTPS (TLS) using your own certificates, or provisioned automatically using [LetsEncrypt.org](https://letsencrypt.org)
|
||||
* Configuring [HSTS](https://en.wikipedia.org/wiki/HTTP_Strict_Transport_Security) to restrict your browser clients to always use HTTPS
|
||||
* Configuring the read and write timeouts to avoid stale clients consuming server resources
|
||||
* Configuring the freegeoip web server to read the client IP (for logs, etc) from the X-Forwarded-For header when running behind a reverse proxy
|
||||
* Configuring [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) to restrict access to your API to specific domains
|
||||
* Configuring a specific endpoint path prefix other than the default "/" (thus /json, /xml, /csv) to serve the API alongside other APIs on the same host
|
||||
* Optimizing your round trips by enabling [TCP Fast Open](https://en.wikipedia.org/wiki/TCP_Fast_Open) on your OS and the freegeoip web server
|
||||
* Setting up usage limits (quotas) for your clients (per client IP) based on requests per time interval; we support various backends such as in-memory map (for single instance), or redis or memcache for distributed deployments
|
||||
* Serve the default [GeoLite2 City](http://dev.maxmind.com/geoip/geoip2/geolite2/) free database that is downloaded and updated automatically in background on a configurable schedule, or
|
||||
* Serve the commercial [GeoIP2 City](https://www.maxmind.com/en/geoip2-city) database from MaxMind, either as a local file that you provide and update periodically (so the server can reload it), or configured to be downloaded periodically using your API key
|
||||
|
||||
See the [Server Options](#serveroptions) section below for more information on configuring the server.
|
||||
|
||||
For automation, check out the [freegeoip chef cookbook](https://supermarket.chef.io/cookbooks/freegeoip) or the (legacy) [Ansible Playbook](./cmd/freegeoip/ansible-playbook) for Ubuntu 14.04 LTS.
|
||||
|
||||
<a name="serveroptions">
|
||||
|
||||
### Server Options
|
||||
|
||||
To see all the available options, use the `-help` option:
|
||||
|
||||
```bash
|
||||
docker run --rm -it apilayer/freegeoip -help
|
||||
```
|
||||
|
||||
If you're using LetsEncrypt.org to provision your TLS certificates, you have to listen for HTTPS on port 443. Following is an example of the server listening on 3 different ports: metrics + pprof (8888), http (80), and https (443):
|
||||
|
||||
```bash
|
||||
docker run -p 8888:8888 -p 80:8080 -p 443:8443 -d apilayer/freegeoip \
|
||||
-internal-server=:8888 \
|
||||
-http=:8080 \
|
||||
-https=:8443 \
|
||||
-hsts=max-age=31536000 \
|
||||
-letsencrypt \
|
||||
-letsencrypt-hosts=myfancydomain.io
|
||||
```
|
||||
|
||||
You can configure the freegeiop web server via command line flags or environment variables. The names of environment variables are the same for command line flags, but prefixed with FREEGEOIP, all upperscase, separated by underscores. If you want to use environment variables instead:
|
||||
|
||||
```bash
|
||||
$ cat prod.env
|
||||
FREEGEOIP_INTERNAL_SERVER=:8888
|
||||
FREEGEOIP_HTTP=:8080
|
||||
FREEGEOIP_HTTPS=:8443
|
||||
FREEGEOIP_HSTS=max-age=31536000
|
||||
FREEGEOIP_LETSENCRYPT=true
|
||||
FREEGEOIP_LETSENCRYPT_HOSTS=myfancydomain.io
|
||||
|
||||
$ docker run --env-file=prod.env -p 8888:8888 -p 80:8080 -p 443:8443 -d apilayer/freegeoip
|
||||
```
|
||||
|
||||
By default, HTTP/2 is enabled over HTTPS. You can disable by passing the `-http2=false` flag.
|
||||
|
||||
Also, the Docker image of freegeoip does not provide the web page from freegeiop.net, it only provides the API. If you want to serve that page, you can pass the `-public=/var/www` parameter in the command line. You can also tell Docker to mount that directory as a volume on the host machine and have it serve your own page, using Docker's `-v` parameter.
|
||||
|
||||
If the freegeoip web server is running behind a reverse proxy or load balancer, you have to run it passing the `-use-x-forwarded-for` parameter and provide the `X-Forwarded-For` HTTP header in all requests. This is for the freegeoip web server be able to log the client IP, and to perform geolocation lookups when an IP is not provided to the API, e.g. `/json/` (uses client IP) vs `/json/1.2.3.4`.
|
||||
|
||||
## Database
|
||||
|
||||
The current implementation uses the free [GeoLite2 City](http://dev.maxmind.com/geoip/geoip2/geolite2/) database from MaxMind.
|
||||
|
||||
In the past we had databases from other providers, and at some point even our own database comprised of data from different sources. This means it might change in the future.
|
||||
|
||||
If you have purchased the commercial database from MaxMind, you can point the freegeoip web server or (Go API, for dev) to the URL containing the file, or local file, and the server will use it.
|
||||
|
||||
In case of files on disk, you can replace the file with a newer version and the freegeoip web server will reload it automatically in background. If instead of a file you use a URL (the default), we periodically check the URL in background to see if there's a new database version available, then download the reload it automatically.
|
||||
|
||||
All responses from the freegeiop API contain the date that the database was downloaded in the X-Database-Date HTTP header.
|
||||
|
||||
## API
|
||||
|
||||
The freegeoip API is served by endpoints that encode the response in different formats.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
curl freegeoip.net/json/
|
||||
```
|
||||
|
||||
Returns the geolocation information of your own IP address, the source IP address of the connection.
|
||||
|
||||
You can pass a different IP or hostname. For example, to lookup the geolocation of `github.com` the server resolves the name first, then uses the first IP address available, which might be IPv4 or IPv6:
|
||||
|
||||
```bash
|
||||
curl freegeoip.net/json/github.com
|
||||
```
|
||||
|
||||
Same semantics are available for the `/xml/{ip}` and `/csv/{ip}` endpoints.
|
||||
|
||||
JSON responses can be encoded as JSONP, by adding the `callback` parameter:
|
||||
|
||||
```bash
|
||||
curl freegeoip.net/json/?callback=foobar
|
||||
```
|
||||
|
||||
The callback parameter is ignored on all other endpoints.
|
||||
|
||||
## Metrics and profiling
|
||||
|
||||
The freegeoip web server can provide metrics about its usage, and also supports runtime profiling and tracing.
|
||||
|
||||
Both are disabled by default, but can be enabled by passing the `-internal-server` parameter in the command line. Metrics are generated for [Prometheus](http://prometheus.io) and can be queried at `/metrics` even with curl.
|
||||
|
||||
HTTP pprof is available at `/debug/pprof` and the examples from the [pprof](https://golang.org/pkg/net/http/pprof/) package documentation should work on the freegeiop web server.
|
||||
|
||||
<a name="packagefreegeoip">
|
||||
|
||||
## Package freegeoip
|
||||
|
||||
The freegeoip package for the Go programming language provides two APIs:
|
||||
|
||||
- A database API that requires zero maintenance of the IP database;
|
||||
- A geolocation `http.Handler` that can be used/served by any http server.
|
||||
|
||||
tl;dr if all you want is code then see the `example_test.go` file.
|
||||
|
||||
Otherwise check out the godoc reference.
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/apilayer/freegeoip?status.svg)](https://godoc.org/github.com/apilayer/freegeoip)
|
||||
[![Build Status](https://secure.travis-ci.org/apilayer/freegeoip.png)](http://travis-ci.org/apilayer/freegeoip)
|
||||
[![GoReportCard](https://goreportcard.com/badge/github.com/apilayer/freegeoip)](https://goreportcard.com/report/github.com/apilayer/freegeoip)
|
||||
|
||||
### Features
|
||||
|
||||
- Zero maintenance
|
||||
|
||||
The DB object alone can download an IP database file from the internet and service lookups to your program right away. It will auto-update the file in background and always magically work.
|
||||
|
||||
- DevOps friendly
|
||||
|
||||
If you do care about the database and have the commercial version of the MaxMind database, you can update the database file with your program running and the DB object will load it in background. You can focus on your stuff.
|
||||
|
||||
- Extensible
|
||||
|
||||
Besides the database part, the package provides an `http.Handler` object that you can add to your HTTP server to service IP geolocation lookups with the same simplistic API of freegeoip.net. There's also an interface for crafting your own HTTP responses encoded in any format.
|
||||
|
||||
### Install
|
||||
|
||||
Download the package:
|
||||
|
||||
go get -d github.com/apilayer/freegeoip/...
|
||||
|
||||
Install the web server:
|
||||
|
||||
go install github.com/apilayer/freegeoip/cmd/freegeoip
|
||||
|
||||
Test coverage is quite good, and test code may help you find the stuff you need.
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"name": "freegeoip",
|
||||
"description": "IP geolocation web server",
|
||||
"website": "https://github.com/apilayer/freegeoip",
|
||||
"success_url": "/",
|
||||
"keywords": ["golang", "geoip", "api"]
|
||||
}
|
|
@ -0,0 +1,453 @@
|
|||
// Copyright 2009 The freegeoip authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package freegeoip
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/howeyc/fsnotify"
|
||||
"github.com/oschwald/maxminddb-golang"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUnavailable may be returned by DB.Lookup when the database
|
||||
// points to a URL and is not yet available because it's being
|
||||
// downloaded in background.
|
||||
ErrUnavailable = errors.New("no database available")
|
||||
|
||||
// Local cached copy of a database downloaded from a URL.
|
||||
defaultDB = filepath.Join(os.TempDir(), "freegeoip", "db.gz")
|
||||
|
||||
// MaxMindDB is the URL of the free MaxMind GeoLite2 database.
|
||||
MaxMindDB = "http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz"
|
||||
)
|
||||
|
||||
// DB is the IP geolocation database.
|
||||
type DB struct {
|
||||
file string // Database file name.
|
||||
checksum string // MD5 of the unzipped database file
|
||||
reader *maxminddb.Reader // Actual db object.
|
||||
notifyQuit chan struct{} // Stop auto-update and watch goroutines.
|
||||
notifyOpen chan string // Notify when a db file is open.
|
||||
notifyError chan error // Notify when an error occurs.
|
||||
notifyInfo chan string // Notify random actions for logging
|
||||
closed bool // Mark this db as closed.
|
||||
lastUpdated time.Time // Last time the db was updated.
|
||||
mu sync.RWMutex // Protects all the above.
|
||||
|
||||
updateInterval time.Duration // Update interval.
|
||||
maxRetryInterval time.Duration // Max retry interval in case of failure.
|
||||
}
|
||||
|
||||
// Open creates and initializes a DB from a local file.
|
||||
//
|
||||
// The database file is monitored by fsnotify and automatically
|
||||
// reloads when the file is updated or overwritten.
|
||||
func Open(dsn string) (*DB, error) {
|
||||
db := &DB{
|
||||
file: dsn,
|
||||
notifyQuit: make(chan struct{}),
|
||||
notifyOpen: make(chan string, 1),
|
||||
notifyError: make(chan error, 1),
|
||||
notifyInfo: make(chan string, 1),
|
||||
}
|
||||
err := db.openFile()
|
||||
if err != nil {
|
||||
db.Close()
|
||||
return nil, err
|
||||
}
|
||||
err = db.watchFile()
|
||||
if err != nil {
|
||||
db.Close()
|
||||
return nil, fmt.Errorf("fsnotify failed for %s: %s", dsn, err)
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// MaxMindUpdateURL generates the URL for MaxMind paid databases.
|
||||
func MaxMindUpdateURL(hostname, productID, userID, licenseKey string) (string, error) {
|
||||
limiter := func(r io.Reader) *io.LimitedReader {
|
||||
return &io.LimitedReader{R: r, N: 1 << 30}
|
||||
}
|
||||
baseurl := "https://" + hostname + "/app/"
|
||||
// Get the file name for the product ID.
|
||||
u := baseurl + "update_getfilename?product_id=" + productID
|
||||
resp, err := http.Get(u)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
md5hash := md5.New()
|
||||
_, err = io.Copy(md5hash, limiter(resp.Body))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sum := md5hash.Sum(nil)
|
||||
hexdigest1 := hex.EncodeToString(sum[:])
|
||||
// Get our client IP address.
|
||||
resp, err = http.Get(baseurl + "update_getipaddr")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
md5hash = md5.New()
|
||||
io.WriteString(md5hash, licenseKey)
|
||||
_, err = io.Copy(md5hash, limiter(resp.Body))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sum = md5hash.Sum(nil)
|
||||
hexdigest2 := hex.EncodeToString(sum[:])
|
||||
// Generate the URL.
|
||||
params := url.Values{
|
||||
"db_md5": {hexdigest1},
|
||||
"challenge_md5": {hexdigest2},
|
||||
"user_id": {userID},
|
||||
"edition_id": {productID},
|
||||
}
|
||||
u = baseurl + "update_secure?" + params.Encode()
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// OpenURL creates and initializes a DB from a URL.
|
||||
// It automatically downloads and updates the file in background, and
|
||||
// keeps a local copy on $TMPDIR.
|
||||
func OpenURL(url string, updateInterval, maxRetryInterval time.Duration) (*DB, error) {
|
||||
db := &DB{
|
||||
file: defaultDB,
|
||||
notifyQuit: make(chan struct{}),
|
||||
notifyOpen: make(chan string, 1),
|
||||
notifyError: make(chan error, 1),
|
||||
notifyInfo: make(chan string, 1),
|
||||
updateInterval: updateInterval,
|
||||
maxRetryInterval: maxRetryInterval,
|
||||
}
|
||||
db.openFile() // Optional, might fail.
|
||||
go db.autoUpdate(url)
|
||||
err := db.watchFile()
|
||||
if err != nil {
|
||||
db.Close()
|
||||
return nil, fmt.Errorf("fsnotify failed for %s: %s", db.file, err)
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (db *DB) watchFile() error {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbdir, err := db.makeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go db.watchEvents(watcher)
|
||||
return watcher.Watch(dbdir)
|
||||
}
|
||||
|
||||
func (db *DB) watchEvents(watcher *fsnotify.Watcher) {
|
||||
for {
|
||||
select {
|
||||
case ev := <-watcher.Event:
|
||||
if ev.Name == db.file && (ev.IsCreate() || ev.IsModify()) {
|
||||
db.openFile()
|
||||
}
|
||||
case <-watcher.Error:
|
||||
case <-db.notifyQuit:
|
||||
watcher.Close()
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Second) // Suppress high-rate events.
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) openFile() error {
|
||||
reader, checksum, err := db.newReader(db.file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat, err := os.Stat(db.file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db.setReader(reader, stat.ModTime(), checksum)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) newReader(dbfile string) (*maxminddb.Reader, string, error) {
|
||||
f, err := os.Open(dbfile)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
defer f.Close()
|
||||
gzf, err := gzip.NewReader(f)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
defer gzf.Close()
|
||||
b, err := ioutil.ReadAll(gzf)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
checksum := fmt.Sprintf("%x", md5.Sum(b))
|
||||
mmdb, err := maxminddb.FromBytes(b)
|
||||
return mmdb, checksum, err
|
||||
}
|
||||
|
||||
func (db *DB) setReader(reader *maxminddb.Reader, modtime time.Time, checksum string) {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
if db.closed {
|
||||
reader.Close()
|
||||
return
|
||||
}
|
||||
if db.reader != nil {
|
||||
db.reader.Close()
|
||||
}
|
||||
db.reader = reader
|
||||
db.lastUpdated = modtime.UTC()
|
||||
db.checksum = checksum
|
||||
select {
|
||||
case db.notifyOpen <- db.file:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) autoUpdate(url string) {
|
||||
backoff := time.Second
|
||||
for {
|
||||
db.sendInfo("starting update")
|
||||
err := db.runUpdate(url)
|
||||
if err != nil {
|
||||
bs := backoff.Seconds()
|
||||
ms := db.maxRetryInterval.Seconds()
|
||||
backoff = time.Duration(math.Min(bs*math.E, ms)) * time.Second
|
||||
db.sendError(fmt.Errorf("download failed (will retry in %s): %s", backoff, err))
|
||||
} else {
|
||||
backoff = db.updateInterval
|
||||
}
|
||||
db.sendInfo("finished update")
|
||||
select {
|
||||
case <-db.notifyQuit:
|
||||
return
|
||||
case <-time.After(backoff):
|
||||
// Sleep till time for the next update attempt.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) runUpdate(url string) error {
|
||||
yes, err := db.needUpdate(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !yes {
|
||||
return nil
|
||||
}
|
||||
tmpfile, err := db.download(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = db.renameFile(tmpfile)
|
||||
if err != nil {
|
||||
// Cleanup the tempfile if renaming failed.
|
||||
os.RemoveAll(tmpfile)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (db *DB) needUpdate(url string) (bool, error) {
|
||||
stat, err := os.Stat(db.file)
|
||||
if err != nil {
|
||||
return true, nil // Local db is missing, must be downloaded.
|
||||
}
|
||||
|
||||
resp, err := http.Head(url)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Check X-Database-MD5 if it exists
|
||||
headerMd5 := resp.Header.Get("X-Database-MD5")
|
||||
if len(headerMd5) > 0 && db.checksum != headerMd5 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if stat.Size() != resp.ContentLength {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (db *DB) download(url string) (tmpfile string, err error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
tmpfile = filepath.Join(os.TempDir(),
|
||||
fmt.Sprintf("_freegeoip.%d.db.gz", time.Now().UnixNano()))
|
||||
f, err := os.Create(tmpfile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(f, resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return tmpfile, nil
|
||||
}
|
||||
|
||||
func (db *DB) makeDir() (dbdir string, err error) {
|
||||
dbdir = filepath.Dir(db.file)
|
||||
_, err = os.Stat(dbdir)
|
||||
if err != nil {
|
||||
err = os.MkdirAll(dbdir, 0755)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return dbdir, nil
|
||||
}
|
||||
|
||||
func (db *DB) renameFile(name string) error {
|
||||
os.Rename(db.file, db.file+".bak") // Optional, might fail.
|
||||
_, err := db.makeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(name, db.file)
|
||||
}
|
||||
|
||||
// Date returns the UTC date the database file was last modified.
|
||||
// If no database file has been opened the behaviour of Date is undefined.
|
||||
func (db *DB) Date() time.Time {
|
||||
db.mu.RLock()
|
||||
defer db.mu.RUnlock()
|
||||
return db.lastUpdated
|
||||
}
|
||||
|
||||
// NotifyClose returns a channel that is closed when the database is closed.
|
||||
func (db *DB) NotifyClose() <-chan struct{} {
|
||||
return db.notifyQuit
|
||||
}
|
||||
|
||||
// NotifyOpen returns a channel that notifies when a new database is
|
||||
// loaded or reloaded. This can be used to monitor background updates
|
||||
// when the DB points to a URL.
|
||||
func (db *DB) NotifyOpen() (filename <-chan string) {
|
||||
return db.notifyOpen
|
||||
}
|
||||
|
||||
// NotifyError returns a channel that notifies when an error occurs
|
||||
// while downloading or reloading a DB that points to a URL.
|
||||
func (db *DB) NotifyError() (errChan <-chan error) {
|
||||
return db.notifyError
|
||||
}
|
||||
|
||||
// NotifyInfo returns a channel that notifies informational messages
|
||||
// while downloading or reloading.
|
||||
func (db *DB) NotifyInfo() <-chan string {
|
||||
return db.notifyInfo
|
||||
}
|
||||
|
||||
func (db *DB) sendError(err error) {
|
||||
db.mu.RLock()
|
||||
defer db.mu.RUnlock()
|
||||
if db.closed {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case db.notifyError <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) sendInfo(message string) {
|
||||
db.mu.RLock()
|
||||
defer db.mu.RUnlock()
|
||||
if db.closed {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case db.notifyInfo <- message:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup performs a database lookup of the given IP address, and stores
|
||||
// the response into the result value. The result value must be a struct
|
||||
// with specific fields and tags as described here:
|
||||
// https://godoc.org/github.com/oschwald/maxminddb-golang#Reader.Lookup
|
||||
//
|
||||
// See the DefaultQuery for an example of the result struct.
|
||||
func (db *DB) Lookup(addr net.IP, result interface{}) error {
|
||||
db.mu.RLock()
|
||||
defer db.mu.RUnlock()
|
||||
if db.reader != nil {
|
||||
return db.reader.Lookup(addr, result)
|
||||
}
|
||||
return ErrUnavailable
|
||||
}
|
||||
|
||||
// DefaultQuery is the default query used for database lookups.
|
||||
type DefaultQuery struct {
|
||||
Continent struct {
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"continent"`
|
||||
Country struct {
|
||||
ISOCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"country"`
|
||||
Region []struct {
|
||||
ISOCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"subdivisions"`
|
||||
City struct {
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"city"`
|
||||
Location struct {
|
||||
Latitude float64 `maxminddb:"latitude"`
|
||||
Longitude float64 `maxminddb:"longitude"`
|
||||
MetroCode uint `maxminddb:"metro_code"`
|
||||
TimeZone string `maxminddb:"time_zone"`
|
||||
} `maxminddb:"location"`
|
||||
Postal struct {
|
||||
Code string `maxminddb:"code"`
|
||||
} `maxminddb:"postal"`
|
||||
}
|
||||
|
||||
// Close closes the database.
|
||||
func (db *DB) Close() {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
if !db.closed {
|
||||
db.closed = true
|
||||
close(db.notifyQuit)
|
||||
close(db.notifyOpen)
|
||||
close(db.notifyError)
|
||||
close(db.notifyInfo)
|
||||
}
|
||||
if db.reader != nil {
|
||||
db.reader.Close()
|
||||
db.reader = nil
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
// Copyright 2009 The freegeoip authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package freegeoip provides an API for searching the geolocation of IP
|
||||
// addresses. It uses a database that can be either a local file or a
|
||||
// remote resource from a URL.
|
||||
//
|
||||
// Local databases are monitored by fsnotify and reloaded when the file is
|
||||
// either updated or overwritten.
|
||||
//
|
||||
// Remote databases are automatically downloaded and updated in background
|
||||
// so you can focus on using the API and not managing the database.
|
||||
package freegeoip
|
Binary file not shown.
After Width: | Height: | Size: 14 KiB |
|
@ -0,0 +1,28 @@
|
|||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# You can update this list using the following command:
|
||||
#
|
||||
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Adrien Bustany <adrien@bustany.org>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Case Nelson <case@teammating.com>
|
||||
Chris Howey <howeyc@gmail.com> <chris@howey.me>
|
||||
Christoffer Buchholz <christoffer.buchholz@gmail.com>
|
||||
Dave Cheney <dave@cheney.net>
|
||||
Francisco Souza <f@souza.cc>
|
||||
John C Barstow
|
||||
Kelvin Fo <vmirage@gmail.com>
|
||||
Nathan Youngman <git@nathany.com>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Pursuit92 <JoshChase@techpursuit.net>
|
||||
Rob Figueiredo <robfig@gmail.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Tudor Golubenco <tudor.g@gmail.com>
|
||||
bronze1man <bronze1man@gmail.com>
|
||||
debrando <denis.brandolini@gmail.com>
|
||||
henrikedwards <henrik.edwards@gmail.com>
|
|
@ -0,0 +1,160 @@
|
|||
# Changelog
|
||||
|
||||
## v0.9.0 / 2014-01-17
|
||||
|
||||
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||
|
||||
## v0.8.12 / 2013-11-13
|
||||
|
||||
* [API] Remove FD_SET and friends from Linux adapter
|
||||
|
||||
## v0.8.11 / 2013-11-02
|
||||
|
||||
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||
* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond)
|
||||
|
||||
## v0.8.10 / 2013-10-19
|
||||
|
||||
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||
|
||||
## v0.8.9 / 2013-09-08
|
||||
|
||||
* [Doc] Contributing (thanks @nathany)
|
||||
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||
|
||||
## v0.8.8 / 2013-06-17
|
||||
|
||||
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||
|
||||
## v0.8.7 / 2013-06-03
|
||||
|
||||
* [API] Make syscall flags internal
|
||||
* [Fix] inotify: ignore event changes
|
||||
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||
* [Fix] tests on Windows
|
||||
* lower case error messages
|
||||
|
||||
## v0.8.6 / 2013-05-23
|
||||
|
||||
* kqueue: Use EVT_ONLY flag on Darwin
|
||||
* [Doc] Update README with full example
|
||||
|
||||
## v0.8.5 / 2013-05-09
|
||||
|
||||
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||
|
||||
## v0.8.4 / 2013-04-07
|
||||
|
||||
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||
|
||||
## v0.8.3 / 2013-03-13
|
||||
|
||||
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||
|
||||
## v0.8.2 / 2013-02-07
|
||||
|
||||
* [Doc] add Authors
|
||||
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||
|
||||
## v0.8.1 / 2013-01-09
|
||||
|
||||
* [Fix] Windows path separators
|
||||
* [Doc] BSD License
|
||||
|
||||
## v0.8.0 / 2012-11-09
|
||||
|
||||
* kqueue: directory watching improvements (thanks @vmirage)
|
||||
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||
|
||||
## v0.7.4 / 2012-10-09
|
||||
|
||||
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||
* [Fix] kqueue: modify after recreation of file
|
||||
|
||||
## v0.7.3 / 2012-09-27
|
||||
|
||||
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||
|
||||
## v0.7.2 / 2012-09-01
|
||||
|
||||
* kqueue: events for created directories
|
||||
|
||||
## v0.7.1 / 2012-07-14
|
||||
|
||||
* [Fix] for renaming files
|
||||
|
||||
## v0.7.0 / 2012-07-02
|
||||
|
||||
* [Feature] FSNotify flags
|
||||
* [Fix] inotify: Added file name back to event path
|
||||
|
||||
## v0.6.0 / 2012-06-06
|
||||
|
||||
* kqueue: watch files after directory created (thanks @tmc)
|
||||
|
||||
## v0.5.1 / 2012-05-22
|
||||
|
||||
* [Fix] inotify: remove all watches before Close()
|
||||
|
||||
## v0.5.0 / 2012-05-03
|
||||
|
||||
* [API] kqueue: return errors during watch instead of sending over channel
|
||||
* kqueue: match symlink behavior on Linux
|
||||
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||
|
||||
## v0.4.0 / 2012-03-30
|
||||
|
||||
* Go 1 released: build with go tool
|
||||
* [Feature] Windows support using winfsnotify
|
||||
* Windows does not have attribute change notifications
|
||||
* Roll attribute notifications into IsModify
|
||||
|
||||
## v0.3.0 / 2012-02-19
|
||||
|
||||
* kqueue: add files when watch directory
|
||||
|
||||
## v0.2.0 / 2011-12-30
|
||||
|
||||
* update to latest Go weekly code
|
||||
|
||||
## v0.1.0 / 2011-10-19
|
||||
|
||||
* kqueue: add watch on file creation to match inotify
|
||||
* kqueue: create file event
|
||||
* inotify: ignore `IN_IGNORED` events
|
||||
* event String()
|
||||
* linux: common FileEvent functions
|
||||
* initial commit
|
||||
|
||||
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
||||
[#1]: https://github.com/howeyc/fsnotify/issues/1
|
|
@ -0,0 +1,7 @@
|
|||
# Contributing
|
||||
|
||||
## Moving Notice
|
||||
|
||||
There is a fork being actively developed with a new API in preparation for the Go Standard Library:
|
||||
[github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify)
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2012 fsnotify Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,93 @@
|
|||
# File system notifications for Go
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/howeyc/fsnotify?status.png)](http://godoc.org/github.com/howeyc/fsnotify)
|
||||
|
||||
Cross platform: Windows, Linux, BSD and OS X.
|
||||
|
||||
## Moving Notice
|
||||
|
||||
There is a fork being actively developed with a new API in preparation for the Go Standard Library:
|
||||
[github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify)
|
||||
|
||||
## Example:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/howeyc/fsnotify"
|
||||
)
|
||||
|
||||
func main() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
done := make(chan bool)
|
||||
|
||||
// Process events
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case ev := <-watcher.Event:
|
||||
log.Println("event:", ev)
|
||||
case err := <-watcher.Error:
|
||||
log.Println("error:", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = watcher.Watch("testDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Hang so program doesn't exit
|
||||
<-done
|
||||
|
||||
/* ... do stuff ... */
|
||||
watcher.Close()
|
||||
}
|
||||
```
|
||||
|
||||
For each event:
|
||||
* Name
|
||||
* IsCreate()
|
||||
* IsDelete()
|
||||
* IsModify()
|
||||
* IsRename()
|
||||
|
||||
## FAQ
|
||||
|
||||
**When a file is moved to another directory is it still being watched?**
|
||||
|
||||
No (it shouldn't be, unless you are watching where it was moved to).
|
||||
|
||||
**When I watch a directory, are all subdirectories watched as well?**
|
||||
|
||||
No, you must add watches for any directory you want to watch (a recursive watcher is in the works [#56][]).
|
||||
|
||||
**Do I have to watch the Error and Event channels in a separate goroutine?**
|
||||
|
||||
As of now, yes. Looking into making this single-thread friendly (see [#7][])
|
||||
|
||||
**Why am I receiving multiple events for the same file on OS X?**
|
||||
|
||||
Spotlight indexing on OS X can result in multiple events (see [#62][]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#54][]).
|
||||
|
||||
**How many files can be watched at once?**
|
||||
|
||||
There are OS-specific limits as to how many watches can be created:
|
||||
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit,
|
||||
reaching this limit results in a "no space left on device" error.
|
||||
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
|
||||
|
||||
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#56]: https://github.com/howeyc/fsnotify/issues/56
|
||||
[#54]: https://github.com/howeyc/fsnotify/issues/54
|
||||
[#7]: https://github.com/howeyc/fsnotify/issues/7
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package fsnotify implements file system notification.
|
||||
package fsnotify
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
FSN_CREATE = 1
|
||||
FSN_MODIFY = 2
|
||||
FSN_DELETE = 4
|
||||
FSN_RENAME = 8
|
||||
|
||||
FSN_ALL = FSN_MODIFY | FSN_DELETE | FSN_RENAME | FSN_CREATE
|
||||
)
|
||||
|
||||
// Purge events from interal chan to external chan if passes filter
|
||||
func (w *Watcher) purgeEvents() {
|
||||
for ev := range w.internalEvent {
|
||||
sendEvent := false
|
||||
w.fsnmut.Lock()
|
||||
fsnFlags := w.fsnFlags[ev.Name]
|
||||
w.fsnmut.Unlock()
|
||||
|
||||
if (fsnFlags&FSN_CREATE == FSN_CREATE) && ev.IsCreate() {
|
||||
sendEvent = true
|
||||
}
|
||||
|
||||
if (fsnFlags&FSN_MODIFY == FSN_MODIFY) && ev.IsModify() {
|
||||
sendEvent = true
|
||||
}
|
||||
|
||||
if (fsnFlags&FSN_DELETE == FSN_DELETE) && ev.IsDelete() {
|
||||
sendEvent = true
|
||||
}
|
||||
|
||||
if (fsnFlags&FSN_RENAME == FSN_RENAME) && ev.IsRename() {
|
||||
sendEvent = true
|
||||
}
|
||||
|
||||
if sendEvent {
|
||||
w.Event <- ev
|
||||
}
|
||||
|
||||
// If there's no file, then no more events for user
|
||||
// BSD must keep watch for internal use (watches DELETEs to keep track
|
||||
// what files exist for create events)
|
||||
if ev.IsDelete() {
|
||||
w.fsnmut.Lock()
|
||||
delete(w.fsnFlags, ev.Name)
|
||||
w.fsnmut.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
close(w.Event)
|
||||
}
|
||||
|
||||
// Watch a given file path
|
||||
func (w *Watcher) Watch(path string) error {
|
||||
return w.WatchFlags(path, FSN_ALL)
|
||||
}
|
||||
|
||||
// Watch a given file path for a particular set of notifications (FSN_MODIFY etc.)
|
||||
func (w *Watcher) WatchFlags(path string, flags uint32) error {
|
||||
w.fsnmut.Lock()
|
||||
w.fsnFlags[path] = flags
|
||||
w.fsnmut.Unlock()
|
||||
return w.watch(path)
|
||||
}
|
||||
|
||||
// Remove a watch on a file
|
||||
func (w *Watcher) RemoveWatch(path string) error {
|
||||
w.fsnmut.Lock()
|
||||
delete(w.fsnFlags, path)
|
||||
w.fsnmut.Unlock()
|
||||
return w.removeWatch(path)
|
||||
}
|
||||
|
||||
// String formats the event e in the form
|
||||
// "filename: DELETE|MODIFY|..."
|
||||
func (e *FileEvent) String() string {
|
||||
var events string = ""
|
||||
|
||||
if e.IsCreate() {
|
||||
events += "|" + "CREATE"
|
||||
}
|
||||
|
||||
if e.IsDelete() {
|
||||
events += "|" + "DELETE"
|
||||
}
|
||||
|
||||
if e.IsModify() {
|
||||
events += "|" + "MODIFY"
|
||||
}
|
||||
|
||||
if e.IsRename() {
|
||||
events += "|" + "RENAME"
|
||||
}
|
||||
|
||||
if e.IsAttrib() {
|
||||
events += "|" + "ATTRIB"
|
||||
}
|
||||
|
||||
if len(events) > 0 {
|
||||
events = events[1:]
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%q: %s", e.Name, events)
|
||||
}
|
|
@ -0,0 +1,496 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// Flags (from <sys/event.h>)
|
||||
sys_NOTE_DELETE = 0x0001 /* vnode was removed */
|
||||
sys_NOTE_WRITE = 0x0002 /* data contents changed */
|
||||
sys_NOTE_EXTEND = 0x0004 /* size increased */
|
||||
sys_NOTE_ATTRIB = 0x0008 /* attributes changed */
|
||||
sys_NOTE_LINK = 0x0010 /* link count changed */
|
||||
sys_NOTE_RENAME = 0x0020 /* vnode was renamed */
|
||||
sys_NOTE_REVOKE = 0x0040 /* vnode access was revoked */
|
||||
|
||||
// Watch all events
|
||||
sys_NOTE_ALLEVENTS = sys_NOTE_DELETE | sys_NOTE_WRITE | sys_NOTE_ATTRIB | sys_NOTE_RENAME
|
||||
|
||||
// Block for 100 ms on each call to kevent
|
||||
keventWaitTime = 100e6
|
||||
)
|
||||
|
||||
type FileEvent struct {
|
||||
mask uint32 // Mask of events
|
||||
Name string // File name (optional)
|
||||
create bool // set by fsnotify package if found new file
|
||||
}
|
||||
|
||||
// IsCreate reports whether the FileEvent was triggered by a creation
|
||||
func (e *FileEvent) IsCreate() bool { return e.create }
|
||||
|
||||
// IsDelete reports whether the FileEvent was triggered by a delete
|
||||
func (e *FileEvent) IsDelete() bool { return (e.mask & sys_NOTE_DELETE) == sys_NOTE_DELETE }
|
||||
|
||||
// IsModify reports whether the FileEvent was triggered by a file modification
|
||||
func (e *FileEvent) IsModify() bool {
|
||||
return ((e.mask&sys_NOTE_WRITE) == sys_NOTE_WRITE || (e.mask&sys_NOTE_ATTRIB) == sys_NOTE_ATTRIB)
|
||||
}
|
||||
|
||||
// IsRename reports whether the FileEvent was triggered by a change name
|
||||
func (e *FileEvent) IsRename() bool { return (e.mask & sys_NOTE_RENAME) == sys_NOTE_RENAME }
|
||||
|
||||
// IsAttrib reports whether the FileEvent was triggered by a change in the file metadata.
|
||||
func (e *FileEvent) IsAttrib() bool {
|
||||
return (e.mask & sys_NOTE_ATTRIB) == sys_NOTE_ATTRIB
|
||||
}
|
||||
|
||||
type Watcher struct {
|
||||
mu sync.Mutex // Mutex for the Watcher itself.
|
||||
kq int // File descriptor (as returned by the kqueue() syscall)
|
||||
watches map[string]int // Map of watched file descriptors (key: path)
|
||||
wmut sync.Mutex // Protects access to watches.
|
||||
fsnFlags map[string]uint32 // Map of watched files to flags used for filter
|
||||
fsnmut sync.Mutex // Protects access to fsnFlags.
|
||||
enFlags map[string]uint32 // Map of watched files to evfilt note flags used in kqueue
|
||||
enmut sync.Mutex // Protects access to enFlags.
|
||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||
finfo map[int]os.FileInfo // Map of file information (isDir, isReg; key: watch descriptor)
|
||||
pmut sync.Mutex // Protects access to paths and finfo.
|
||||
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events)
|
||||
femut sync.Mutex // Protects access to fileExists.
|
||||
externalWatches map[string]bool // Map of watches added by user of the library.
|
||||
ewmut sync.Mutex // Protects access to externalWatches.
|
||||
Error chan error // Errors are sent on this channel
|
||||
internalEvent chan *FileEvent // Events are queued on this channel
|
||||
Event chan *FileEvent // Events are returned on this channel
|
||||
done chan bool // Channel for sending a "quit message" to the reader goroutine
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
// NewWatcher creates and returns a new kevent instance using kqueue(2)
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
fd, errno := syscall.Kqueue()
|
||||
if fd == -1 {
|
||||
return nil, os.NewSyscallError("kqueue", errno)
|
||||
}
|
||||
w := &Watcher{
|
||||
kq: fd,
|
||||
watches: make(map[string]int),
|
||||
fsnFlags: make(map[string]uint32),
|
||||
enFlags: make(map[string]uint32),
|
||||
paths: make(map[int]string),
|
||||
finfo: make(map[int]os.FileInfo),
|
||||
fileExists: make(map[string]bool),
|
||||
externalWatches: make(map[string]bool),
|
||||
internalEvent: make(chan *FileEvent),
|
||||
Event: make(chan *FileEvent),
|
||||
Error: make(chan error),
|
||||
done: make(chan bool, 1),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
go w.purgeEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close closes a kevent watcher instance
|
||||
// It sends a message to the reader goroutine to quit and removes all watches
|
||||
// associated with the kevent instance
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
w.mu.Unlock()
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
w.done <- true
|
||||
w.wmut.Lock()
|
||||
ws := w.watches
|
||||
w.wmut.Unlock()
|
||||
for path := range ws {
|
||||
w.removeWatch(path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddWatch adds path to the watched file set.
|
||||
// The flags are interpreted as described in kevent(2).
|
||||
func (w *Watcher) addWatch(path string, flags uint32) error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return errors.New("kevent instance already closed")
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
watchDir := false
|
||||
|
||||
w.wmut.Lock()
|
||||
watchfd, found := w.watches[path]
|
||||
w.wmut.Unlock()
|
||||
if !found {
|
||||
fi, errstat := os.Lstat(path)
|
||||
if errstat != nil {
|
||||
return errstat
|
||||
}
|
||||
|
||||
// don't watch socket
|
||||
if fi.Mode()&os.ModeSocket == os.ModeSocket {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Follow Symlinks
|
||||
// Unfortunately, Linux can add bogus symlinks to watch list without
|
||||
// issue, and Windows can't do symlinks period (AFAIK). To maintain
|
||||
// consistency, we will act like everything is fine. There will simply
|
||||
// be no file events for broken symlinks.
|
||||
// Hence the returns of nil on errors.
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
path, err := filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
fi, errstat = os.Lstat(path)
|
||||
if errstat != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
fd, errno := syscall.Open(path, open_FLAGS, 0700)
|
||||
if fd == -1 {
|
||||
return errno
|
||||
}
|
||||
watchfd = fd
|
||||
|
||||
w.wmut.Lock()
|
||||
w.watches[path] = watchfd
|
||||
w.wmut.Unlock()
|
||||
|
||||
w.pmut.Lock()
|
||||
w.paths[watchfd] = path
|
||||
w.finfo[watchfd] = fi
|
||||
w.pmut.Unlock()
|
||||
}
|
||||
// Watch the directory if it has not been watched before.
|
||||
w.pmut.Lock()
|
||||
w.enmut.Lock()
|
||||
if w.finfo[watchfd].IsDir() &&
|
||||
(flags&sys_NOTE_WRITE) == sys_NOTE_WRITE &&
|
||||
(!found || (w.enFlags[path]&sys_NOTE_WRITE) != sys_NOTE_WRITE) {
|
||||
watchDir = true
|
||||
}
|
||||
w.enmut.Unlock()
|
||||
w.pmut.Unlock()
|
||||
|
||||
w.enmut.Lock()
|
||||
w.enFlags[path] = flags
|
||||
w.enmut.Unlock()
|
||||
|
||||
var kbuf [1]syscall.Kevent_t
|
||||
watchEntry := &kbuf[0]
|
||||
watchEntry.Fflags = flags
|
||||
syscall.SetKevent(watchEntry, watchfd, syscall.EVFILT_VNODE, syscall.EV_ADD|syscall.EV_CLEAR)
|
||||
entryFlags := watchEntry.Flags
|
||||
success, errno := syscall.Kevent(w.kq, kbuf[:], nil, nil)
|
||||
if success == -1 {
|
||||
return errno
|
||||
} else if (entryFlags & syscall.EV_ERROR) == syscall.EV_ERROR {
|
||||
return errors.New("kevent add error")
|
||||
}
|
||||
|
||||
if watchDir {
|
||||
errdir := w.watchDirectoryFiles(path)
|
||||
if errdir != nil {
|
||||
return errdir
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch adds path to the watched file set, watching all events.
|
||||
func (w *Watcher) watch(path string) error {
|
||||
w.ewmut.Lock()
|
||||
w.externalWatches[path] = true
|
||||
w.ewmut.Unlock()
|
||||
return w.addWatch(path, sys_NOTE_ALLEVENTS)
|
||||
}
|
||||
|
||||
// RemoveWatch removes path from the watched file set.
|
||||
func (w *Watcher) removeWatch(path string) error {
|
||||
w.wmut.Lock()
|
||||
watchfd, ok := w.watches[path]
|
||||
w.wmut.Unlock()
|
||||
if !ok {
|
||||
return errors.New(fmt.Sprintf("can't remove non-existent kevent watch for: %s", path))
|
||||
}
|
||||
var kbuf [1]syscall.Kevent_t
|
||||
watchEntry := &kbuf[0]
|
||||
syscall.SetKevent(watchEntry, watchfd, syscall.EVFILT_VNODE, syscall.EV_DELETE)
|
||||
entryFlags := watchEntry.Flags
|
||||
success, errno := syscall.Kevent(w.kq, kbuf[:], nil, nil)
|
||||
if success == -1 {
|
||||
return os.NewSyscallError("kevent_rm_watch", errno)
|
||||
} else if (entryFlags & syscall.EV_ERROR) == syscall.EV_ERROR {
|
||||
return errors.New("kevent rm error")
|
||||
}
|
||||
syscall.Close(watchfd)
|
||||
w.wmut.Lock()
|
||||
delete(w.watches, path)
|
||||
w.wmut.Unlock()
|
||||
w.enmut.Lock()
|
||||
delete(w.enFlags, path)
|
||||
w.enmut.Unlock()
|
||||
w.pmut.Lock()
|
||||
delete(w.paths, watchfd)
|
||||
fInfo := w.finfo[watchfd]
|
||||
delete(w.finfo, watchfd)
|
||||
w.pmut.Unlock()
|
||||
|
||||
// Find all watched paths that are in this directory that are not external.
|
||||
if fInfo.IsDir() {
|
||||
var pathsToRemove []string
|
||||
w.pmut.Lock()
|
||||
for _, wpath := range w.paths {
|
||||
wdir, _ := filepath.Split(wpath)
|
||||
if filepath.Clean(wdir) == filepath.Clean(path) {
|
||||
w.ewmut.Lock()
|
||||
if !w.externalWatches[wpath] {
|
||||
pathsToRemove = append(pathsToRemove, wpath)
|
||||
}
|
||||
w.ewmut.Unlock()
|
||||
}
|
||||
}
|
||||
w.pmut.Unlock()
|
||||
for _, p := range pathsToRemove {
|
||||
// Since these are internal, not much sense in propagating error
|
||||
// to the user, as that will just confuse them with an error about
|
||||
// a path they did not explicitly watch themselves.
|
||||
w.removeWatch(p)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the kqueue file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Event channel
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
eventbuf [10]syscall.Kevent_t // Event buffer
|
||||
events []syscall.Kevent_t // Received events
|
||||
twait *syscall.Timespec // Time to block waiting for events
|
||||
n int // Number of events returned from kevent
|
||||
errno error // Syscall errno
|
||||
)
|
||||
events = eventbuf[0:0]
|
||||
twait = new(syscall.Timespec)
|
||||
*twait = syscall.NsecToTimespec(keventWaitTime)
|
||||
|
||||
for {
|
||||
// See if there is a message on the "done" channel
|
||||
var done bool
|
||||
select {
|
||||
case done = <-w.done:
|
||||
default:
|
||||
}
|
||||
|
||||
// If "done" message is received
|
||||
if done {
|
||||
errno := syscall.Close(w.kq)
|
||||
if errno != nil {
|
||||
w.Error <- os.NewSyscallError("close", errno)
|
||||
}
|
||||
close(w.internalEvent)
|
||||
close(w.Error)
|
||||
return
|
||||
}
|
||||
|
||||
// Get new events
|
||||
if len(events) == 0 {
|
||||
n, errno = syscall.Kevent(w.kq, nil, eventbuf[:], twait)
|
||||
|
||||
// EINTR is okay, basically the syscall was interrupted before
|
||||
// timeout expired.
|
||||
if errno != nil && errno != syscall.EINTR {
|
||||
w.Error <- os.NewSyscallError("kevent", errno)
|
||||
continue
|
||||
}
|
||||
|
||||
// Received some events
|
||||
if n > 0 {
|
||||
events = eventbuf[0:n]
|
||||
}
|
||||
}
|
||||
|
||||
// Flush the events we received to the events channel
|
||||
for len(events) > 0 {
|
||||
fileEvent := new(FileEvent)
|
||||
watchEvent := &events[0]
|
||||
fileEvent.mask = uint32(watchEvent.Fflags)
|
||||
w.pmut.Lock()
|
||||
fileEvent.Name = w.paths[int(watchEvent.Ident)]
|
||||
fileInfo := w.finfo[int(watchEvent.Ident)]
|
||||
w.pmut.Unlock()
|
||||
if fileInfo != nil && fileInfo.IsDir() && !fileEvent.IsDelete() {
|
||||
// Double check to make sure the directory exist. This can happen when
|
||||
// we do a rm -fr on a recursively watched folders and we receive a
|
||||
// modification event first but the folder has been deleted and later
|
||||
// receive the delete event
|
||||
if _, err := os.Lstat(fileEvent.Name); os.IsNotExist(err) {
|
||||
// mark is as delete event
|
||||
fileEvent.mask |= sys_NOTE_DELETE
|
||||
}
|
||||
}
|
||||
|
||||
if fileInfo != nil && fileInfo.IsDir() && fileEvent.IsModify() && !fileEvent.IsDelete() {
|
||||
w.sendDirectoryChangeEvents(fileEvent.Name)
|
||||
} else {
|
||||
// Send the event on the events channel
|
||||
w.internalEvent <- fileEvent
|
||||
}
|
||||
|
||||
// Move to next event
|
||||
events = events[1:]
|
||||
|
||||
if fileEvent.IsRename() {
|
||||
w.removeWatch(fileEvent.Name)
|
||||
w.femut.Lock()
|
||||
delete(w.fileExists, fileEvent.Name)
|
||||
w.femut.Unlock()
|
||||
}
|
||||
if fileEvent.IsDelete() {
|
||||
w.removeWatch(fileEvent.Name)
|
||||
w.femut.Lock()
|
||||
delete(w.fileExists, fileEvent.Name)
|
||||
w.femut.Unlock()
|
||||
|
||||
// Look for a file that may have overwritten this
|
||||
// (ie mv f1 f2 will delete f2 then create f2)
|
||||
fileDir, _ := filepath.Split(fileEvent.Name)
|
||||
fileDir = filepath.Clean(fileDir)
|
||||
w.wmut.Lock()
|
||||
_, found := w.watches[fileDir]
|
||||
w.wmut.Unlock()
|
||||
if found {
|
||||
// make sure the directory exist before we watch for changes. When we
|
||||
// do a recursive watch and perform rm -fr, the parent directory might
|
||||
// have gone missing, ignore the missing directory and let the
|
||||
// upcoming delete event remove the watch form the parent folder
|
||||
if _, err := os.Lstat(fileDir); !os.IsNotExist(err) {
|
||||
w.sendDirectoryChangeEvents(fileDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Search for new files
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
|
||||
// Inherit fsnFlags from parent directory
|
||||
w.fsnmut.Lock()
|
||||
if flags, found := w.fsnFlags[dirPath]; found {
|
||||
w.fsnFlags[filePath] = flags
|
||||
} else {
|
||||
w.fsnFlags[filePath] = FSN_ALL
|
||||
}
|
||||
w.fsnmut.Unlock()
|
||||
|
||||
if fileInfo.IsDir() == false {
|
||||
// Watch file to mimic linux fsnotify
|
||||
e := w.addWatch(filePath, sys_NOTE_ALLEVENTS)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
} else {
|
||||
// If the user is currently watching directory
|
||||
// we want to preserve the flags used
|
||||
w.enmut.Lock()
|
||||
currFlags, found := w.enFlags[filePath]
|
||||
w.enmut.Unlock()
|
||||
var newFlags uint32 = sys_NOTE_DELETE
|
||||
if found {
|
||||
newFlags |= currFlags
|
||||
}
|
||||
|
||||
// Linux gives deletes if not explicitly watching
|
||||
e := w.addWatch(filePath, newFlags)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
w.femut.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.femut.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendDirectoryEvents searches the directory for newly created files
|
||||
// and sends them over the event channel. This functionality is to have
|
||||
// the BSD version of fsnotify match linux fsnotify which provides a
|
||||
// create event for files created in a watched directory.
|
||||
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
w.Error <- err
|
||||
}
|
||||
|
||||
// Search for new files
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
w.femut.Lock()
|
||||
_, doesExist := w.fileExists[filePath]
|
||||
w.femut.Unlock()
|
||||
if !doesExist {
|
||||
// Inherit fsnFlags from parent directory
|
||||
w.fsnmut.Lock()
|
||||
if flags, found := w.fsnFlags[dirPath]; found {
|
||||
w.fsnFlags[filePath] = flags
|
||||
} else {
|
||||
w.fsnFlags[filePath] = FSN_ALL
|
||||
}
|
||||
w.fsnmut.Unlock()
|
||||
|
||||
// Send create event
|
||||
fileEvent := new(FileEvent)
|
||||
fileEvent.Name = filePath
|
||||
fileEvent.create = true
|
||||
w.internalEvent <- fileEvent
|
||||
}
|
||||
w.femut.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.femut.Unlock()
|
||||
}
|
||||
w.watchDirectoryFiles(dirPath)
|
||||
}
|
|
@ -0,0 +1,304 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// Options for inotify_init() are not exported
|
||||
// sys_IN_CLOEXEC uint32 = syscall.IN_CLOEXEC
|
||||
// sys_IN_NONBLOCK uint32 = syscall.IN_NONBLOCK
|
||||
|
||||
// Options for AddWatch
|
||||
sys_IN_DONT_FOLLOW uint32 = syscall.IN_DONT_FOLLOW
|
||||
sys_IN_ONESHOT uint32 = syscall.IN_ONESHOT
|
||||
sys_IN_ONLYDIR uint32 = syscall.IN_ONLYDIR
|
||||
|
||||
// The "sys_IN_MASK_ADD" option is not exported, as AddWatch
|
||||
// adds it automatically, if there is already a watch for the given path
|
||||
// sys_IN_MASK_ADD uint32 = syscall.IN_MASK_ADD
|
||||
|
||||
// Events
|
||||
sys_IN_ACCESS uint32 = syscall.IN_ACCESS
|
||||
sys_IN_ALL_EVENTS uint32 = syscall.IN_ALL_EVENTS
|
||||
sys_IN_ATTRIB uint32 = syscall.IN_ATTRIB
|
||||
sys_IN_CLOSE uint32 = syscall.IN_CLOSE
|
||||
sys_IN_CLOSE_NOWRITE uint32 = syscall.IN_CLOSE_NOWRITE
|
||||
sys_IN_CLOSE_WRITE uint32 = syscall.IN_CLOSE_WRITE
|
||||
sys_IN_CREATE uint32 = syscall.IN_CREATE
|
||||
sys_IN_DELETE uint32 = syscall.IN_DELETE
|
||||
sys_IN_DELETE_SELF uint32 = syscall.IN_DELETE_SELF
|
||||
sys_IN_MODIFY uint32 = syscall.IN_MODIFY
|
||||
sys_IN_MOVE uint32 = syscall.IN_MOVE
|
||||
sys_IN_MOVED_FROM uint32 = syscall.IN_MOVED_FROM
|
||||
sys_IN_MOVED_TO uint32 = syscall.IN_MOVED_TO
|
||||
sys_IN_MOVE_SELF uint32 = syscall.IN_MOVE_SELF
|
||||
sys_IN_OPEN uint32 = syscall.IN_OPEN
|
||||
|
||||
sys_AGNOSTIC_EVENTS = sys_IN_MOVED_TO | sys_IN_MOVED_FROM | sys_IN_CREATE | sys_IN_ATTRIB | sys_IN_MODIFY | sys_IN_MOVE_SELF | sys_IN_DELETE | sys_IN_DELETE_SELF
|
||||
|
||||
// Special events
|
||||
sys_IN_ISDIR uint32 = syscall.IN_ISDIR
|
||||
sys_IN_IGNORED uint32 = syscall.IN_IGNORED
|
||||
sys_IN_Q_OVERFLOW uint32 = syscall.IN_Q_OVERFLOW
|
||||
sys_IN_UNMOUNT uint32 = syscall.IN_UNMOUNT
|
||||
)
|
||||
|
||||
type FileEvent struct {
|
||||
mask uint32 // Mask of events
|
||||
cookie uint32 // Unique cookie associating related events (for rename(2))
|
||||
Name string // File name (optional)
|
||||
}
|
||||
|
||||
// IsCreate reports whether the FileEvent was triggered by a creation
|
||||
func (e *FileEvent) IsCreate() bool {
|
||||
return (e.mask&sys_IN_CREATE) == sys_IN_CREATE || (e.mask&sys_IN_MOVED_TO) == sys_IN_MOVED_TO
|
||||
}
|
||||
|
||||
// IsDelete reports whether the FileEvent was triggered by a delete
|
||||
func (e *FileEvent) IsDelete() bool {
|
||||
return (e.mask&sys_IN_DELETE_SELF) == sys_IN_DELETE_SELF || (e.mask&sys_IN_DELETE) == sys_IN_DELETE
|
||||
}
|
||||
|
||||
// IsModify reports whether the FileEvent was triggered by a file modification or attribute change
|
||||
func (e *FileEvent) IsModify() bool {
|
||||
return ((e.mask&sys_IN_MODIFY) == sys_IN_MODIFY || (e.mask&sys_IN_ATTRIB) == sys_IN_ATTRIB)
|
||||
}
|
||||
|
||||
// IsRename reports whether the FileEvent was triggered by a change name
|
||||
func (e *FileEvent) IsRename() bool {
|
||||
return ((e.mask&sys_IN_MOVE_SELF) == sys_IN_MOVE_SELF || (e.mask&sys_IN_MOVED_FROM) == sys_IN_MOVED_FROM)
|
||||
}
|
||||
|
||||
// IsAttrib reports whether the FileEvent was triggered by a change in the file metadata.
|
||||
func (e *FileEvent) IsAttrib() bool {
|
||||
return (e.mask & sys_IN_ATTRIB) == sys_IN_ATTRIB
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
}
|
||||
|
||||
type Watcher struct {
|
||||
mu sync.Mutex // Map access
|
||||
fd int // File descriptor (as returned by the inotify_init() syscall)
|
||||
watches map[string]*watch // Map of inotify watches (key: path)
|
||||
fsnFlags map[string]uint32 // Map of watched files to flags used for filter
|
||||
fsnmut sync.Mutex // Protects access to fsnFlags.
|
||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||
Error chan error // Errors are sent on this channel
|
||||
internalEvent chan *FileEvent // Events are queued on this channel
|
||||
Event chan *FileEvent // Events are returned on this channel
|
||||
done chan bool // Channel for sending a "quit message" to the reader goroutine
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
// NewWatcher creates and returns a new inotify instance using inotify_init(2)
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
fd, errno := syscall.InotifyInit()
|
||||
if fd == -1 {
|
||||
return nil, os.NewSyscallError("inotify_init", errno)
|
||||
}
|
||||
w := &Watcher{
|
||||
fd: fd,
|
||||
watches: make(map[string]*watch),
|
||||
fsnFlags: make(map[string]uint32),
|
||||
paths: make(map[int]string),
|
||||
internalEvent: make(chan *FileEvent),
|
||||
Event: make(chan *FileEvent),
|
||||
Error: make(chan error),
|
||||
done: make(chan bool, 1),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
go w.purgeEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close closes an inotify watcher instance
|
||||
// It sends a message to the reader goroutine to quit and removes all watches
|
||||
// associated with the inotify instance
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed {
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// Remove all watches
|
||||
for path := range w.watches {
|
||||
w.RemoveWatch(path)
|
||||
}
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
w.done <- true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddWatch adds path to the watched file set.
|
||||
// The flags are interpreted as described in inotify_add_watch(2).
|
||||
func (w *Watcher) addWatch(path string, flags uint32) error {
|
||||
if w.isClosed {
|
||||
return errors.New("inotify instance already closed")
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
watchEntry, found := w.watches[path]
|
||||
w.mu.Unlock()
|
||||
if found {
|
||||
watchEntry.flags |= flags
|
||||
flags |= syscall.IN_MASK_ADD
|
||||
}
|
||||
wd, errno := syscall.InotifyAddWatch(w.fd, path, flags)
|
||||
if wd == -1 {
|
||||
return errno
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.watches[path] = &watch{wd: uint32(wd), flags: flags}
|
||||
w.paths[wd] = path
|
||||
w.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch adds path to the watched file set, watching all events.
|
||||
func (w *Watcher) watch(path string) error {
|
||||
return w.addWatch(path, sys_AGNOSTIC_EVENTS)
|
||||
}
|
||||
|
||||
// RemoveWatch removes path from the watched file set.
|
||||
func (w *Watcher) removeWatch(path string) error {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watch, ok := w.watches[path]
|
||||
if !ok {
|
||||
return errors.New(fmt.Sprintf("can't remove non-existent inotify watch for: %s", path))
|
||||
}
|
||||
success, errno := syscall.InotifyRmWatch(w.fd, watch.wd)
|
||||
if success == -1 {
|
||||
return os.NewSyscallError("inotify_rm_watch", errno)
|
||||
}
|
||||
delete(w.watches, path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Event channel
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
buf [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
n int // Number of bytes read with read()
|
||||
errno error // Syscall errno
|
||||
)
|
||||
|
||||
for {
|
||||
// See if there is a message on the "done" channel
|
||||
select {
|
||||
case <-w.done:
|
||||
syscall.Close(w.fd)
|
||||
close(w.internalEvent)
|
||||
close(w.Error)
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
n, errno = syscall.Read(w.fd, buf[:])
|
||||
|
||||
// If EOF is received
|
||||
if n == 0 {
|
||||
syscall.Close(w.fd)
|
||||
close(w.internalEvent)
|
||||
close(w.Error)
|
||||
return
|
||||
}
|
||||
|
||||
if n < 0 {
|
||||
w.Error <- os.NewSyscallError("read", errno)
|
||||
continue
|
||||
}
|
||||
if n < syscall.SizeofInotifyEvent {
|
||||
w.Error <- errors.New("inotify: short read in readEvents()")
|
||||
continue
|
||||
}
|
||||
|
||||
var offset uint32 = 0
|
||||
// We don't know how many events we just read into the buffer
|
||||
// While the offset points to at least one whole event...
|
||||
for offset <= uint32(n-syscall.SizeofInotifyEvent) {
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
event := new(FileEvent)
|
||||
event.mask = uint32(raw.Mask)
|
||||
event.cookie = uint32(raw.Cookie)
|
||||
nameLen := uint32(raw.Len)
|
||||
// If the event happened to the watched directory or the watched file, the kernel
|
||||
// doesn't append the filename to the event, but we would like to always fill the
|
||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||
// the "paths" map.
|
||||
w.mu.Lock()
|
||||
event.Name = w.paths[int(raw.Wd)]
|
||||
w.mu.Unlock()
|
||||
watchedName := event.Name
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
|
||||
// The filename is padded with NUL bytes. TrimRight() gets rid of those.
|
||||
event.Name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
// Send the events that are not ignored on the events channel
|
||||
if !event.ignoreLinux() {
|
||||
// Setup FSNotify flags (inherit from directory watch)
|
||||
w.fsnmut.Lock()
|
||||
if _, fsnFound := w.fsnFlags[event.Name]; !fsnFound {
|
||||
if fsnFlags, watchFound := w.fsnFlags[watchedName]; watchFound {
|
||||
w.fsnFlags[event.Name] = fsnFlags
|
||||
} else {
|
||||
w.fsnFlags[event.Name] = FSN_ALL
|
||||
}
|
||||
}
|
||||
w.fsnmut.Unlock()
|
||||
|
||||
w.internalEvent <- event
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += syscall.SizeofInotifyEvent + nameLen
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Certain types of events can be "ignored" and not sent over the Event
|
||||
// channel. Such as events marked ignore by the kernel, or MODIFY events
|
||||
// against files that do not exist.
|
||||
func (e *FileEvent) ignoreLinux() bool {
|
||||
// Ignore anything the inotify API says to ignore
|
||||
if e.mask&sys_IN_IGNORED == sys_IN_IGNORED {
|
||||
return true
|
||||
}
|
||||
|
||||
// If the event is not a DELETE or RENAME, the file must exist.
|
||||
// Otherwise the event is ignored.
|
||||
// *Note*: this was put in place because it was seen that a MODIFY
|
||||
// event was sent after the DELETE. This ignores that MODIFY and
|
||||
// assumes a DELETE will come or has come if the file doesn't exist.
|
||||
if !(e.IsDelete() || e.IsRename()) {
|
||||
_, statErr := os.Lstat(e.Name)
|
||||
return os.IsNotExist(statErr)
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "syscall"
|
||||
|
||||
const open_FLAGS = syscall.O_NONBLOCK | syscall.O_RDONLY
|
|
@ -0,0 +1,11 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "syscall"
|
||||
|
||||
const open_FLAGS = syscall.O_EVTONLY
|
|
@ -0,0 +1,598 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// Options for AddWatch
|
||||
sys_FS_ONESHOT = 0x80000000
|
||||
sys_FS_ONLYDIR = 0x1000000
|
||||
|
||||
// Events
|
||||
sys_FS_ACCESS = 0x1
|
||||
sys_FS_ALL_EVENTS = 0xfff
|
||||
sys_FS_ATTRIB = 0x4
|
||||
sys_FS_CLOSE = 0x18
|
||||
sys_FS_CREATE = 0x100
|
||||
sys_FS_DELETE = 0x200
|
||||
sys_FS_DELETE_SELF = 0x400
|
||||
sys_FS_MODIFY = 0x2
|
||||
sys_FS_MOVE = 0xc0
|
||||
sys_FS_MOVED_FROM = 0x40
|
||||
sys_FS_MOVED_TO = 0x80
|
||||
sys_FS_MOVE_SELF = 0x800
|
||||
|
||||
// Special events
|
||||
sys_FS_IGNORED = 0x8000
|
||||
sys_FS_Q_OVERFLOW = 0x4000
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO(nj): Use syscall.ERROR_MORE_DATA from ztypes_windows in Go 1.3+
|
||||
sys_ERROR_MORE_DATA syscall.Errno = 234
|
||||
)
|
||||
|
||||
// Event is the type of the notification messages
|
||||
// received on the watcher's Event channel.
|
||||
type FileEvent struct {
|
||||
mask uint32 // Mask of events
|
||||
cookie uint32 // Unique cookie associating related events (for rename)
|
||||
Name string // File name (optional)
|
||||
}
|
||||
|
||||
// IsCreate reports whether the FileEvent was triggered by a creation
|
||||
func (e *FileEvent) IsCreate() bool { return (e.mask & sys_FS_CREATE) == sys_FS_CREATE }
|
||||
|
||||
// IsDelete reports whether the FileEvent was triggered by a delete
|
||||
func (e *FileEvent) IsDelete() bool {
|
||||
return ((e.mask&sys_FS_DELETE) == sys_FS_DELETE || (e.mask&sys_FS_DELETE_SELF) == sys_FS_DELETE_SELF)
|
||||
}
|
||||
|
||||
// IsModify reports whether the FileEvent was triggered by a file modification or attribute change
|
||||
func (e *FileEvent) IsModify() bool {
|
||||
return ((e.mask&sys_FS_MODIFY) == sys_FS_MODIFY || (e.mask&sys_FS_ATTRIB) == sys_FS_ATTRIB)
|
||||
}
|
||||
|
||||
// IsRename reports whether the FileEvent was triggered by a change name
|
||||
func (e *FileEvent) IsRename() bool {
|
||||
return ((e.mask&sys_FS_MOVE) == sys_FS_MOVE || (e.mask&sys_FS_MOVE_SELF) == sys_FS_MOVE_SELF || (e.mask&sys_FS_MOVED_FROM) == sys_FS_MOVED_FROM || (e.mask&sys_FS_MOVED_TO) == sys_FS_MOVED_TO)
|
||||
}
|
||||
|
||||
// IsAttrib reports whether the FileEvent was triggered by a change in the file metadata.
|
||||
func (e *FileEvent) IsAttrib() bool {
|
||||
return (e.mask & sys_FS_ATTRIB) == sys_FS_ATTRIB
|
||||
}
|
||||
|
||||
const (
|
||||
opAddWatch = iota
|
||||
opRemoveWatch
|
||||
)
|
||||
|
||||
const (
|
||||
provisional uint64 = 1 << (32 + iota)
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
handle syscall.Handle
|
||||
volume uint32
|
||||
index uint64
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov syscall.Overlapped
|
||||
ino *inode // i-number
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf [4096]byte
|
||||
}
|
||||
|
||||
type indexMap map[uint64]*watch
|
||||
type watchMap map[uint32]indexMap
|
||||
|
||||
// A Watcher waits for and receives event notifications
|
||||
// for a specific set of files and directories.
|
||||
type Watcher struct {
|
||||
mu sync.Mutex // Map access
|
||||
port syscall.Handle // Handle to completion port
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
fsnFlags map[string]uint32 // Map of watched files to flags used for filter
|
||||
fsnmut sync.Mutex // Protects access to fsnFlags.
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
internalEvent chan *FileEvent // Events are queued on this channel
|
||||
Event chan *FileEvent // Events are returned on this channel
|
||||
Error chan error // Errors are sent on this channel
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
quit chan chan<- error
|
||||
cookie uint32
|
||||
}
|
||||
|
||||
// NewWatcher creates and returns a Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
w := &Watcher{
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
fsnFlags: make(map[string]uint32),
|
||||
input: make(chan *input, 1),
|
||||
Event: make(chan *FileEvent, 50),
|
||||
internalEvent: make(chan *FileEvent),
|
||||
Error: make(chan error),
|
||||
quit: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
go w.purgeEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close closes a Watcher.
|
||||
// It sends a message to the reader goroutine to quit and removes all watches
|
||||
// associated with the watcher.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed {
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.quit <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
// AddWatch adds path to the watched file set.
|
||||
func (w *Watcher) AddWatch(path string, flags uint32) error {
|
||||
if w.isClosed {
|
||||
return errors.New("watcher already closed")
|
||||
}
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(path),
|
||||
flags: flags,
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// Watch adds path to the watched file set, watching all events.
|
||||
func (w *Watcher) watch(path string) error {
|
||||
return w.AddWatch(path, sys_FS_ALL_EVENTS)
|
||||
}
|
||||
|
||||
// RemoveWatch removes path from the watched file set.
|
||||
func (w *Watcher) removeWatch(path string) error {
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(path),
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
func (w *Watcher) wakeupReader() error {
|
||||
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if e != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDir(pathname string) (dir string, err error) {
|
||||
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
|
||||
if e != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", e)
|
||||
}
|
||||
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
dir = pathname
|
||||
} else {
|
||||
dir, _ = filepath.Split(pathname)
|
||||
dir = filepath.Clean(dir)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getIno(path string) (ino *inode, err error) {
|
||||
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
|
||||
syscall.FILE_LIST_DIRECTORY,
|
||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||
nil, syscall.OPEN_EXISTING,
|
||||
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateFile", e)
|
||||
}
|
||||
var fi syscall.ByHandleFileInformation
|
||||
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
|
||||
syscall.CloseHandle(h)
|
||||
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
|
||||
}
|
||||
ino = &inode{
|
||||
handle: h,
|
||||
volume: fi.VolumeSerialNumber,
|
||||
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||
}
|
||||
return ino, nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) get(ino *inode) *watch {
|
||||
if i := m[ino.volume]; i != nil {
|
||||
return i[ino.index]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) set(ino *inode, watch *watch) {
|
||||
i := m[ino.volume]
|
||||
if i == nil {
|
||||
i = make(indexMap)
|
||||
m[ino.volume] = i
|
||||
}
|
||||
i[ino.index] = watch
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if flags&sys_FS_ONLYDIR != 0 && pathname != dir {
|
||||
return nil
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watchEntry := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watchEntry == nil {
|
||||
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
return os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
w.mu.Unlock()
|
||||
flags |= provisional
|
||||
} else {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask |= flags
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||
}
|
||||
if err = w.startRead(watchEntry); err != nil {
|
||||
return err
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask &= ^provisional
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) remWatch(pathname string) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watch == nil {
|
||||
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
return w.startRead(watch)
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) startRead(watch *watch) error {
|
||||
if e := syscall.CancelIo(watch.ino.handle); e != nil {
|
||||
w.Error <- os.NewSyscallError("CancelIo", e)
|
||||
w.deleteWatch(watch)
|
||||
}
|
||||
mask := toWindowsFlags(watch.mask)
|
||||
for _, m := range watch.names {
|
||||
mask |= toWindowsFlags(m)
|
||||
}
|
||||
if mask == 0 {
|
||||
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
|
||||
w.Error <- os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
w.mu.Lock()
|
||||
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||
if e != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", e)
|
||||
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) {
|
||||
if watch.mask&sys_FS_ONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Event channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
n, key uint32
|
||||
ov *syscall.Overlapped
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, index := range indexes {
|
||||
for _, watch := range index {
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if e := syscall.CloseHandle(w.port); e != nil {
|
||||
err = os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
close(w.internalEvent)
|
||||
close(w.Error)
|
||||
ch <- err
|
||||
return
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch e {
|
||||
case sys_ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.Error <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
|
||||
} else {
|
||||
// The i/o succeeded but the buffer is full.
|
||||
// In theory we should be building up a full packet.
|
||||
// In practice we can get away with just carrying on.
|
||||
n = uint32(unsafe.Sizeof(watch.buf))
|
||||
}
|
||||
case syscall.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
case syscall.ERROR_OPERATION_ABORTED:
|
||||
// CancelIo was called on this handle
|
||||
continue
|
||||
default:
|
||||
w.Error <- os.NewSyscallError("GetQueuedCompletionPort", e)
|
||||
continue
|
||||
case nil:
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.internalEvent <- &FileEvent{mask: sys_FS_Q_OVERFLOW}
|
||||
w.Error <- errors.New("short read in readEvents()")
|
||||
break
|
||||
}
|
||||
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
|
||||
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
|
||||
fullname := watch.path + "\\" + name
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
mask = sys_FS_DELETE_SELF
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
mask = sys_FS_MODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
watch.rename = name
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
if watch.names[watch.rename] != 0 {
|
||||
watch.names[name] |= watch.names[watch.rename]
|
||||
delete(watch.names, watch.rename)
|
||||
mask = sys_FS_MOVE_SELF
|
||||
}
|
||||
}
|
||||
|
||||
sendNameEvent := func() {
|
||||
if w.sendEvent(fullname, watch.names[name]&mask) {
|
||||
if watch.names[name]&sys_FS_ONESHOT != 0 {
|
||||
delete(watch.names, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
sendNameEvent()
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
|
||||
if watch.mask&sys_FS_ONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
fullname = watch.path + "\\" + watch.rename
|
||||
sendNameEvent()
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
if raw.NextEntryOffset == 0 {
|
||||
break
|
||||
}
|
||||
offset += raw.NextEntryOffset
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
w.Error <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.startRead(watch); err != nil {
|
||||
w.Error <- err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
event := &FileEvent{mask: uint32(mask), Name: name}
|
||||
if mask&sys_FS_MOVE != 0 {
|
||||
if mask&sys_FS_MOVED_FROM != 0 {
|
||||
w.cookie++
|
||||
}
|
||||
event.cookie = w.cookie
|
||||
}
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.quit <- ch
|
||||
case w.Event <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sys_FS_ACCESS != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
|
||||
}
|
||||
if mask&sys_FS_MODIFY != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&sys_FS_ATTRIB != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||
}
|
||||
if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case syscall.FILE_ACTION_ADDED:
|
||||
return sys_FS_CREATE
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
return sys_FS_DELETE
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
return sys_FS_MODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
return sys_FS_MOVED_FROM
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
return sys_FS_MOVED_TO
|
||||
}
|
||||
return 0
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
ISC License
|
||||
|
||||
Copyright (c) 2015, Gregory J. Oschwald <oschwald@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THIS SOFTWARE.
|
|
@ -0,0 +1,38 @@
|
|||
# MaxMind DB Reader for Go #
|
||||
|
||||
[![Build Status](https://travis-ci.org/oschwald/maxminddb-golang.png?branch=master)](https://travis-ci.org/oschwald/maxminddb-golang)
|
||||
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/4j2f9oep8nnfrmov/branch/master?svg=true)](https://ci.appveyor.com/project/oschwald/maxminddb-golang/branch/master)
|
||||
[![GoDoc](https://godoc.org/github.com/oschwald/maxminddb-golang?status.png)](https://godoc.org/github.com/oschwald/maxminddb-golang)
|
||||
|
||||
This is a Go reader for the MaxMind DB format. Although this can be used to
|
||||
read [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) and
|
||||
[GeoIP2](https://www.maxmind.com/en/geoip2-databases) databases,
|
||||
[geoip2](https://github.com/oschwald/geoip2-golang) provides a higher-level
|
||||
API for doing so.
|
||||
|
||||
This is not an official MaxMind API.
|
||||
|
||||
## Installation ##
|
||||
|
||||
```
|
||||
go get github.com/oschwald/maxminddb-golang
|
||||
```
|
||||
|
||||
## Usage ##
|
||||
|
||||
[See GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) for
|
||||
documentation and examples.
|
||||
|
||||
## Examples ##
|
||||
|
||||
See [GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) or
|
||||
`example_test.go` for examples.
|
||||
|
||||
## Contributing ##
|
||||
|
||||
Contributions welcome! Please fork the repository and open a pull request
|
||||
with your changes.
|
||||
|
||||
## License ##
|
||||
|
||||
This is free software, licensed under the ISC License.
|
|
@ -0,0 +1,19 @@
|
|||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\oschwald\maxminddb-golang
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- git submodule update --init --recursive
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -t ./...
|
||||
|
||||
build_script:
|
||||
- go test -v ./...
|
|
@ -0,0 +1,721 @@
|
|||
package maxminddb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
type dataType int
|
||||
|
||||
const (
|
||||
_Extended dataType = iota
|
||||
_Pointer
|
||||
_String
|
||||
_Float64
|
||||
_Bytes
|
||||
_Uint16
|
||||
_Uint32
|
||||
_Map
|
||||
_Int32
|
||||
_Uint64
|
||||
_Uint128
|
||||
_Slice
|
||||
_Container
|
||||
_Marker
|
||||
_Bool
|
||||
_Float32
|
||||
)
|
||||
|
||||
const (
|
||||
// This is the value used in libmaxminddb
|
||||
maximumDataStructureDepth = 512
|
||||
)
|
||||
|
||||
func (d *decoder) decode(offset uint, result reflect.Value, depth int) (uint, error) {
|
||||
if depth > maximumDataStructureDepth {
|
||||
return 0, newInvalidDatabaseError("exceeded maximum data structure depth; database is likely corrupt")
|
||||
}
|
||||
typeNum, size, newOffset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if typeNum != _Pointer && result.Kind() == reflect.Uintptr {
|
||||
result.Set(reflect.ValueOf(uintptr(offset)))
|
||||
return d.nextValueOffset(offset, 1)
|
||||
}
|
||||
return d.decodeFromType(typeNum, size, newOffset, result, depth+1)
|
||||
}
|
||||
|
||||
func (d *decoder) decodeCtrlData(offset uint) (dataType, uint, uint, error) {
|
||||
newOffset := offset + 1
|
||||
if offset >= uint(len(d.buffer)) {
|
||||
return 0, 0, 0, newOffsetError()
|
||||
}
|
||||
ctrlByte := d.buffer[offset]
|
||||
|
||||
typeNum := dataType(ctrlByte >> 5)
|
||||
if typeNum == _Extended {
|
||||
if newOffset >= uint(len(d.buffer)) {
|
||||
return 0, 0, 0, newOffsetError()
|
||||
}
|
||||
typeNum = dataType(d.buffer[newOffset] + 7)
|
||||
newOffset++
|
||||
}
|
||||
|
||||
var size uint
|
||||
size, newOffset, err := d.sizeFromCtrlByte(ctrlByte, newOffset, typeNum)
|
||||
return typeNum, size, newOffset, err
|
||||
}
|
||||
|
||||
func (d *decoder) sizeFromCtrlByte(ctrlByte byte, offset uint, typeNum dataType) (uint, uint, error) {
|
||||
size := uint(ctrlByte & 0x1f)
|
||||
if typeNum == _Extended {
|
||||
return size, offset, nil
|
||||
}
|
||||
|
||||
var bytesToRead uint
|
||||
if size < 29 {
|
||||
return size, offset, nil
|
||||
}
|
||||
|
||||
bytesToRead = size - 28
|
||||
newOffset := offset + bytesToRead
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return 0, 0, newOffsetError()
|
||||
}
|
||||
if size == 29 {
|
||||
return 29 + uint(d.buffer[offset]), offset + 1, nil
|
||||
}
|
||||
|
||||
sizeBytes := d.buffer[offset:newOffset]
|
||||
|
||||
switch {
|
||||
case size == 30:
|
||||
size = 285 + uintFromBytes(0, sizeBytes)
|
||||
case size > 30:
|
||||
size = uintFromBytes(0, sizeBytes) + 65821
|
||||
}
|
||||
return size, newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFromType(
|
||||
dtype dataType,
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result = d.indirect(result)
|
||||
|
||||
// For these types, size has a special meaning
|
||||
switch dtype {
|
||||
case _Bool:
|
||||
return d.unmarshalBool(size, offset, result)
|
||||
case _Map:
|
||||
return d.unmarshalMap(size, offset, result, depth)
|
||||
case _Pointer:
|
||||
return d.unmarshalPointer(size, offset, result, depth)
|
||||
case _Slice:
|
||||
return d.unmarshalSlice(size, offset, result, depth)
|
||||
}
|
||||
|
||||
// For the remaining types, size is the byte size
|
||||
if offset+size > uint(len(d.buffer)) {
|
||||
return 0, newOffsetError()
|
||||
}
|
||||
switch dtype {
|
||||
case _Bytes:
|
||||
return d.unmarshalBytes(size, offset, result)
|
||||
case _Float32:
|
||||
return d.unmarshalFloat32(size, offset, result)
|
||||
case _Float64:
|
||||
return d.unmarshalFloat64(size, offset, result)
|
||||
case _Int32:
|
||||
return d.unmarshalInt32(size, offset, result)
|
||||
case _String:
|
||||
return d.unmarshalString(size, offset, result)
|
||||
case _Uint16:
|
||||
return d.unmarshalUint(size, offset, result, 16)
|
||||
case _Uint32:
|
||||
return d.unmarshalUint(size, offset, result, 32)
|
||||
case _Uint64:
|
||||
return d.unmarshalUint(size, offset, result, 64)
|
||||
case _Uint128:
|
||||
return d.unmarshalUint128(size, offset, result)
|
||||
default:
|
||||
return 0, newInvalidDatabaseError("unknown type: %d", dtype)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalBool(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 1 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (bool size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeBool(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch result.Kind() {
|
||||
case reflect.Bool:
|
||||
result.SetBool(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
// indirect follows pointers and create values as necessary. This is
|
||||
// heavily based on encoding/json as my original version had a subtle
|
||||
// bug. This method should be considered to be licensed under
|
||||
// https://golang.org/LICENSE
|
||||
func (d *decoder) indirect(result reflect.Value) reflect.Value {
|
||||
for {
|
||||
// Load value from interface, but only if the result will be
|
||||
// usefully addressable.
|
||||
if result.Kind() == reflect.Interface && !result.IsNil() {
|
||||
e := result.Elem()
|
||||
if e.Kind() == reflect.Ptr && !e.IsNil() {
|
||||
result = e
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if result.Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
|
||||
if result.IsNil() {
|
||||
result.Set(reflect.New(result.Type().Elem()))
|
||||
}
|
||||
result = result.Elem()
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
var sliceType = reflect.TypeOf([]byte{})
|
||||
|
||||
func (d *decoder) unmarshalBytes(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
value, newOffset, err := d.decodeBytes(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch result.Kind() {
|
||||
case reflect.Slice:
|
||||
if result.Type() == sliceType {
|
||||
result.SetBytes(value)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalFloat32(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
if size != 4 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float32 size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeFloat32(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
result.SetFloat(float64(value))
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalFloat64(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
|
||||
if size != 8 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float 64 size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeFloat64(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch result.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if result.OverflowFloat(value) {
|
||||
return 0, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
result.SetFloat(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalInt32(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 4 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (int32 size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeInt(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
n := int64(value)
|
||||
if !result.OverflowInt(n) {
|
||||
result.SetInt(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
n := uint64(value)
|
||||
if !result.OverflowUint(n) {
|
||||
result.SetUint(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalMap(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result = d.indirect(result)
|
||||
switch result.Kind() {
|
||||
default:
|
||||
return 0, newUnmarshalTypeError("map", result.Type())
|
||||
case reflect.Struct:
|
||||
return d.decodeStruct(size, offset, result, depth)
|
||||
case reflect.Map:
|
||||
return d.decodeMap(size, offset, result, depth)
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
rv := reflect.ValueOf(make(map[string]interface{}, size))
|
||||
newOffset, err := d.decodeMap(size, offset, rv, depth)
|
||||
result.Set(rv)
|
||||
return newOffset, err
|
||||
}
|
||||
return 0, newUnmarshalTypeError("map", result.Type())
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalPointer(size uint, offset uint, result reflect.Value, depth int) (uint, error) {
|
||||
pointer, newOffset, err := d.decodePointer(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = d.decode(pointer, result, depth)
|
||||
return newOffset, err
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalSlice(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
switch result.Kind() {
|
||||
case reflect.Slice:
|
||||
return d.decodeSlice(size, offset, result, depth)
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
a := []interface{}{}
|
||||
rv := reflect.ValueOf(&a).Elem()
|
||||
newOffset, err := d.decodeSlice(size, offset, rv, depth)
|
||||
result.Set(rv)
|
||||
return newOffset, err
|
||||
}
|
||||
}
|
||||
return 0, newUnmarshalTypeError("array", result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalString(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
value, newOffset, err := d.decodeString(size, offset)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch result.Kind() {
|
||||
case reflect.String:
|
||||
result.SetString(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalUint(size uint, offset uint, result reflect.Value, uintType uint) (uint, error) {
|
||||
if size > uintType/8 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint%v size of %v)", uintType, size)
|
||||
}
|
||||
|
||||
value, newOffset, err := d.decodeUint(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
n := int64(value)
|
||||
if !result.OverflowInt(n) {
|
||||
result.SetInt(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
if !result.OverflowUint(value) {
|
||||
result.SetUint(value)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
var bigIntType = reflect.TypeOf(big.Int{})
|
||||
|
||||
func (d *decoder) unmarshalUint128(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 16 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint128 size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeUint128(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Struct:
|
||||
if result.Type() == bigIntType {
|
||||
result.Set(reflect.ValueOf(*value))
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBool(size uint, offset uint) (bool, uint, error) {
|
||||
return size != 0, offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBytes(size uint, offset uint) ([]byte, uint, error) {
|
||||
newOffset := offset + size
|
||||
bytes := make([]byte, size)
|
||||
copy(bytes, d.buffer[offset:newOffset])
|
||||
return bytes, newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat64(size uint, offset uint) (float64, uint, error) {
|
||||
newOffset := offset + size
|
||||
bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset])
|
||||
return math.Float64frombits(bits), newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat32(size uint, offset uint) (float32, uint, error) {
|
||||
newOffset := offset + size
|
||||
bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset])
|
||||
return math.Float32frombits(bits), newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt(size uint, offset uint) (int, uint, error) {
|
||||
newOffset := offset + size
|
||||
var val int32
|
||||
for _, b := range d.buffer[offset:newOffset] {
|
||||
val = (val << 8) | int32(b)
|
||||
}
|
||||
return int(val), newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeMap(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
if result.IsNil() {
|
||||
result.Set(reflect.MakeMap(result.Type()))
|
||||
}
|
||||
|
||||
for i := uint(0); i < size; i++ {
|
||||
var key []byte
|
||||
var err error
|
||||
key, offset, err = d.decodeKey(offset)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
value := reflect.New(result.Type().Elem())
|
||||
offset, err = d.decode(offset, value, depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
result.SetMapIndex(reflect.ValueOf(string(key)), value.Elem())
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodePointer(
|
||||
size uint,
|
||||
offset uint,
|
||||
) (uint, uint, error) {
|
||||
pointerSize := ((size >> 3) & 0x3) + 1
|
||||
newOffset := offset + pointerSize
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return 0, 0, newOffsetError()
|
||||
}
|
||||
pointerBytes := d.buffer[offset:newOffset]
|
||||
var prefix uint
|
||||
if pointerSize == 4 {
|
||||
prefix = 0
|
||||
} else {
|
||||
prefix = uint(size & 0x7)
|
||||
}
|
||||
unpacked := uintFromBytes(prefix, pointerBytes)
|
||||
|
||||
var pointerValueOffset uint
|
||||
switch pointerSize {
|
||||
case 1:
|
||||
pointerValueOffset = 0
|
||||
case 2:
|
||||
pointerValueOffset = 2048
|
||||
case 3:
|
||||
pointerValueOffset = 526336
|
||||
case 4:
|
||||
pointerValueOffset = 0
|
||||
}
|
||||
|
||||
pointer := unpacked + pointerValueOffset
|
||||
|
||||
return pointer, newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeSlice(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result.Set(reflect.MakeSlice(result.Type(), int(size), int(size)))
|
||||
for i := 0; i < int(size); i++ {
|
||||
var err error
|
||||
offset, err = d.decode(offset, result.Index(i), depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeString(size uint, offset uint) (string, uint, error) {
|
||||
newOffset := offset + size
|
||||
return string(d.buffer[offset:newOffset]), newOffset, nil
|
||||
}
|
||||
|
||||
type fieldsType struct {
|
||||
namedFields map[string]int
|
||||
anonymousFields []int
|
||||
}
|
||||
|
||||
var (
|
||||
fieldMap = map[reflect.Type]*fieldsType{}
|
||||
fieldMapMu sync.RWMutex
|
||||
)
|
||||
|
||||
func (d *decoder) decodeStruct(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
resultType := result.Type()
|
||||
|
||||
fieldMapMu.RLock()
|
||||
fields, ok := fieldMap[resultType]
|
||||
fieldMapMu.RUnlock()
|
||||
if !ok {
|
||||
numFields := resultType.NumField()
|
||||
namedFields := make(map[string]int, numFields)
|
||||
var anonymous []int
|
||||
for i := 0; i < numFields; i++ {
|
||||
field := resultType.Field(i)
|
||||
|
||||
fieldName := field.Name
|
||||
if tag := field.Tag.Get("maxminddb"); tag != "" {
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
fieldName = tag
|
||||
}
|
||||
if field.Anonymous {
|
||||
anonymous = append(anonymous, i)
|
||||
continue
|
||||
}
|
||||
namedFields[fieldName] = i
|
||||
}
|
||||
fieldMapMu.Lock()
|
||||
fields = &fieldsType{namedFields, anonymous}
|
||||
fieldMap[resultType] = fields
|
||||
fieldMapMu.Unlock()
|
||||
}
|
||||
|
||||
// This fills in embedded structs
|
||||
for _, i := range fields.anonymousFields {
|
||||
_, err := d.unmarshalMap(size, offset, result.Field(i), depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// This handles named fields
|
||||
for i := uint(0); i < size; i++ {
|
||||
var (
|
||||
err error
|
||||
key []byte
|
||||
)
|
||||
key, offset, err = d.decodeKey(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// The string() does not create a copy due to this compiler
|
||||
// optimization: https://github.com/golang/go/issues/3512
|
||||
j, ok := fields.namedFields[string(key)]
|
||||
if !ok {
|
||||
offset, err = d.nextValueOffset(offset, 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
offset, err = d.decode(offset, result.Field(j), depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeUint(size uint, offset uint) (uint64, uint, error) {
|
||||
newOffset := offset + size
|
||||
bytes := d.buffer[offset:newOffset]
|
||||
|
||||
var val uint64
|
||||
for _, b := range bytes {
|
||||
val = (val << 8) | uint64(b)
|
||||
}
|
||||
return val, newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeUint128(size uint, offset uint) (*big.Int, uint, error) {
|
||||
newOffset := offset + size
|
||||
val := new(big.Int)
|
||||
val.SetBytes(d.buffer[offset:newOffset])
|
||||
|
||||
return val, newOffset, nil
|
||||
}
|
||||
|
||||
func uintFromBytes(prefix uint, uintBytes []byte) uint {
|
||||
val := prefix
|
||||
for _, b := range uintBytes {
|
||||
val = (val << 8) | uint(b)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// decodeKey decodes a map key into []byte slice. We use a []byte so that we
|
||||
// can take advantage of https://github.com/golang/go/issues/3512 to avoid
|
||||
// copying the bytes when decoding a struct. Previously, we achieved this by
|
||||
// using unsafe.
|
||||
func (d *decoder) decodeKey(offset uint) ([]byte, uint, error) {
|
||||
typeNum, size, dataOffset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if typeNum == _Pointer {
|
||||
pointer, ptrOffset, err := d.decodePointer(size, dataOffset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
key, _, err := d.decodeKey(pointer)
|
||||
return key, ptrOffset, err
|
||||
}
|
||||
if typeNum != _String {
|
||||
return nil, 0, newInvalidDatabaseError("unexpected type when decoding string: %v", typeNum)
|
||||
}
|
||||
newOffset := dataOffset + size
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return nil, 0, newOffsetError()
|
||||
}
|
||||
return d.buffer[dataOffset:newOffset], newOffset, nil
|
||||
}
|
||||
|
||||
// This function is used to skip ahead to the next value without decoding
|
||||
// the one at the offset passed in. The size bits have different meanings for
|
||||
// different data types
|
||||
func (d *decoder) nextValueOffset(offset uint, numberToSkip uint) (uint, error) {
|
||||
if numberToSkip == 0 {
|
||||
return offset, nil
|
||||
}
|
||||
typeNum, size, offset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch typeNum {
|
||||
case _Pointer:
|
||||
_, offset, err = d.decodePointer(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
case _Map:
|
||||
numberToSkip += 2 * size
|
||||
case _Slice:
|
||||
numberToSkip += size
|
||||
case _Bool:
|
||||
default:
|
||||
offset += size
|
||||
}
|
||||
return d.nextValueOffset(offset, numberToSkip-1)
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
package maxminddb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// InvalidDatabaseError is returned when the database contains invalid data
|
||||
// and cannot be parsed.
|
||||
type InvalidDatabaseError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func newOffsetError() InvalidDatabaseError {
|
||||
return InvalidDatabaseError{"unexpected end of database"}
|
||||
}
|
||||
|
||||
func newInvalidDatabaseError(format string, args ...interface{}) InvalidDatabaseError {
|
||||
return InvalidDatabaseError{fmt.Sprintf(format, args...)}
|
||||
}
|
||||
|
||||
func (e InvalidDatabaseError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// UnmarshalTypeError is returned when the value in the database cannot be
|
||||
// assigned to the specified data type.
|
||||
type UnmarshalTypeError struct {
|
||||
Value string // stringified copy of the database value that caused the error
|
||||
Type reflect.Type // type of the value that could not be assign to
|
||||
}
|
||||
|
||||
func newUnmarshalTypeError(value interface{}, rType reflect.Type) UnmarshalTypeError {
|
||||
return UnmarshalTypeError{
|
||||
Value: fmt.Sprintf("%v", value),
|
||||
Type: rType,
|
||||
}
|
||||
}
|
||||
|
||||
func (e UnmarshalTypeError) Error() string {
|
||||
return fmt.Sprintf("maxminddb: cannot unmarshal %s into type %s", e.Value, e.Type.String())
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
// +build !windows,!appengine
|
||||
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func mmap(fd int, length int) (data []byte, err error) {
|
||||
return unix.Mmap(fd, 0, length, unix.PROT_READ, unix.MAP_SHARED)
|
||||
}
|
||||
|
||||
func munmap(b []byte) (err error) {
|
||||
return unix.Munmap(b)
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
// +build windows,!appengine
|
||||
|
||||
package maxminddb
|
||||
|
||||
// Windows support largely borrowed from mmap-go.
|
||||
//
|
||||
// Copyright 2011 Evan Shaw. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
type memoryMap []byte
|
||||
|
||||
// Windows
|
||||
var handleLock sync.Mutex
|
||||
var handleMap = map[uintptr]windows.Handle{}
|
||||
|
||||
func mmap(fd int, length int) (data []byte, err error) {
|
||||
h, errno := windows.CreateFileMapping(windows.Handle(fd), nil,
|
||||
uint32(windows.PAGE_READONLY), 0, uint32(length), nil)
|
||||
if h == 0 {
|
||||
return nil, os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
|
||||
addr, errno := windows.MapViewOfFile(h, uint32(windows.FILE_MAP_READ), 0,
|
||||
0, uintptr(length))
|
||||
if addr == 0 {
|
||||
return nil, os.NewSyscallError("MapViewOfFile", errno)
|
||||
}
|
||||
handleLock.Lock()
|
||||
handleMap[addr] = h
|
||||
handleLock.Unlock()
|
||||
|
||||
m := memoryMap{}
|
||||
dh := m.header()
|
||||
dh.Data = addr
|
||||
dh.Len = length
|
||||
dh.Cap = dh.Len
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *memoryMap) header() *reflect.SliceHeader {
|
||||
return (*reflect.SliceHeader)(unsafe.Pointer(m))
|
||||
}
|
||||
|
||||
func flush(addr, len uintptr) error {
|
||||
errno := windows.FlushViewOfFile(addr, len)
|
||||
return os.NewSyscallError("FlushViewOfFile", errno)
|
||||
}
|
||||
|
||||
func munmap(b []byte) (err error) {
|
||||
m := memoryMap(b)
|
||||
dh := m.header()
|
||||
|
||||
addr := dh.Data
|
||||
length := uintptr(dh.Len)
|
||||
|
||||
flush(addr, length)
|
||||
err = windows.UnmapViewOfFile(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
handleLock.Lock()
|
||||
defer handleLock.Unlock()
|
||||
handle, ok := handleMap[addr]
|
||||
if !ok {
|
||||
// should be impossible; we would've errored above
|
||||
return errors.New("unknown base address")
|
||||
}
|
||||
delete(handleMap, addr)
|
||||
|
||||
e := windows.CloseHandle(windows.Handle(handle))
|
||||
return os.NewSyscallError("CloseHandle", e)
|
||||
}
|
|
@ -0,0 +1,259 @@
|
|||
package maxminddb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
// NotFound is returned by LookupOffset when a matched root record offset
|
||||
// cannot be found.
|
||||
NotFound = ^uintptr(0)
|
||||
|
||||
dataSectionSeparatorSize = 16
|
||||
)
|
||||
|
||||
var metadataStartMarker = []byte("\xAB\xCD\xEFMaxMind.com")
|
||||
|
||||
// Reader holds the data corresponding to the MaxMind DB file. Its only public
|
||||
// field is Metadata, which contains the metadata from the MaxMind DB file.
|
||||
type Reader struct {
|
||||
hasMappedFile bool
|
||||
buffer []byte
|
||||
decoder decoder
|
||||
Metadata Metadata
|
||||
ipv4Start uint
|
||||
}
|
||||
|
||||
// Metadata holds the metadata decoded from the MaxMind DB file. In particular
|
||||
// in has the format version, the build time as Unix epoch time, the database
|
||||
// type and description, the IP version supported, and a slice of the natural
|
||||
// languages included.
|
||||
type Metadata struct {
|
||||
BinaryFormatMajorVersion uint `maxminddb:"binary_format_major_version"`
|
||||
BinaryFormatMinorVersion uint `maxminddb:"binary_format_minor_version"`
|
||||
BuildEpoch uint `maxminddb:"build_epoch"`
|
||||
DatabaseType string `maxminddb:"database_type"`
|
||||
Description map[string]string `maxminddb:"description"`
|
||||
IPVersion uint `maxminddb:"ip_version"`
|
||||
Languages []string `maxminddb:"languages"`
|
||||
NodeCount uint `maxminddb:"node_count"`
|
||||
RecordSize uint `maxminddb:"record_size"`
|
||||
}
|
||||
|
||||
// FromBytes takes a byte slice corresponding to a MaxMind DB file and returns
|
||||
// a Reader structure or an error.
|
||||
func FromBytes(buffer []byte) (*Reader, error) {
|
||||
metadataStart := bytes.LastIndex(buffer, metadataStartMarker)
|
||||
|
||||
if metadataStart == -1 {
|
||||
return nil, newInvalidDatabaseError("error opening database: invalid MaxMind DB file")
|
||||
}
|
||||
|
||||
metadataStart += len(metadataStartMarker)
|
||||
metadataDecoder := decoder{buffer[metadataStart:]}
|
||||
|
||||
var metadata Metadata
|
||||
|
||||
rvMetdata := reflect.ValueOf(&metadata)
|
||||
_, err := metadataDecoder.decode(0, rvMetdata, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
searchTreeSize := metadata.NodeCount * metadata.RecordSize / 4
|
||||
dataSectionStart := searchTreeSize + dataSectionSeparatorSize
|
||||
dataSectionEnd := uint(metadataStart - len(metadataStartMarker))
|
||||
if dataSectionStart > dataSectionEnd {
|
||||
return nil, newInvalidDatabaseError("the MaxMind DB contains invalid metadata")
|
||||
}
|
||||
d := decoder{
|
||||
buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)],
|
||||
}
|
||||
|
||||
reader := &Reader{
|
||||
buffer: buffer,
|
||||
decoder: d,
|
||||
Metadata: metadata,
|
||||
ipv4Start: 0,
|
||||
}
|
||||
|
||||
reader.ipv4Start, err = reader.startNode()
|
||||
|
||||
return reader, err
|
||||
}
|
||||
|
||||
func (r *Reader) startNode() (uint, error) {
|
||||
if r.Metadata.IPVersion != 6 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
|
||||
node := uint(0)
|
||||
var err error
|
||||
for i := 0; i < 96 && node < nodeCount; i++ {
|
||||
node, err = r.readNode(node, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return node, err
|
||||
}
|
||||
|
||||
// Lookup takes an IP address as a net.IP structure and a pointer to the
|
||||
// result value to Decode into.
|
||||
func (r *Reader) Lookup(ipAddress net.IP, result interface{}) error {
|
||||
if r.buffer == nil {
|
||||
return errors.New("cannot call Lookup on a closed database")
|
||||
}
|
||||
pointer, err := r.lookupPointer(ipAddress)
|
||||
if pointer == 0 || err != nil {
|
||||
return err
|
||||
}
|
||||
return r.retrieveData(pointer, result)
|
||||
}
|
||||
|
||||
// LookupOffset maps an argument net.IP to a corresponding record offset in the
|
||||
// database. NotFound is returned if no such record is found, and a record may
|
||||
// otherwise be extracted by passing the returned offset to Decode. LookupOffset
|
||||
// is an advanced API, which exists to provide clients with a means to cache
|
||||
// previously-decoded records.
|
||||
func (r *Reader) LookupOffset(ipAddress net.IP) (uintptr, error) {
|
||||
if r.buffer == nil {
|
||||
return 0, errors.New("cannot call LookupOffset on a closed database")
|
||||
}
|
||||
pointer, err := r.lookupPointer(ipAddress)
|
||||
if pointer == 0 || err != nil {
|
||||
return NotFound, err
|
||||
}
|
||||
return r.resolveDataPointer(pointer)
|
||||
}
|
||||
|
||||
// Decode the record at |offset| into |result|. The result value pointed to
|
||||
// must be a data value that corresponds to a record in the database. This may
|
||||
// include a struct representation of the data, a map capable of holding the
|
||||
// data or an empty interface{} value.
|
||||
//
|
||||
// If result is a pointer to a struct, the struct need not include a field
|
||||
// for every value that may be in the database. If a field is not present in
|
||||
// the structure, the decoder will not decode that field, reducing the time
|
||||
// required to decode the record.
|
||||
//
|
||||
// As a special case, a struct field of type uintptr will be used to capture
|
||||
// the offset of the value. Decode may later be used to extract the stored
|
||||
// value from the offset. MaxMind DBs are highly normalized: for example in
|
||||
// the City database, all records of the same country will reference a
|
||||
// single representative record for that country. This uintptr behavior allows
|
||||
// clients to leverage this normalization in their own sub-record caching.
|
||||
func (r *Reader) Decode(offset uintptr, result interface{}) error {
|
||||
if r.buffer == nil {
|
||||
return errors.New("cannot call Decode on a closed database")
|
||||
}
|
||||
return r.decode(offset, result)
|
||||
}
|
||||
|
||||
func (r *Reader) decode(offset uintptr, result interface{}) error {
|
||||
rv := reflect.ValueOf(result)
|
||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
||||
return errors.New("result param must be a pointer")
|
||||
}
|
||||
|
||||
_, err := r.decoder.decode(uint(offset), reflect.ValueOf(result), 0)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *Reader) lookupPointer(ipAddress net.IP) (uint, error) {
|
||||
if ipAddress == nil {
|
||||
return 0, errors.New("ipAddress passed to Lookup cannot be nil")
|
||||
}
|
||||
|
||||
ipV4Address := ipAddress.To4()
|
||||
if ipV4Address != nil {
|
||||
ipAddress = ipV4Address
|
||||
}
|
||||
if len(ipAddress) == 16 && r.Metadata.IPVersion == 4 {
|
||||
return 0, fmt.Errorf("error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database", ipAddress.String())
|
||||
}
|
||||
|
||||
return r.findAddressInTree(ipAddress)
|
||||
}
|
||||
|
||||
func (r *Reader) findAddressInTree(ipAddress net.IP) (uint, error) {
|
||||
|
||||
bitCount := uint(len(ipAddress) * 8)
|
||||
|
||||
var node uint
|
||||
if bitCount == 32 {
|
||||
node = r.ipv4Start
|
||||
}
|
||||
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
|
||||
for i := uint(0); i < bitCount && node < nodeCount; i++ {
|
||||
bit := uint(1) & (uint(ipAddress[i>>3]) >> (7 - (i % 8)))
|
||||
|
||||
var err error
|
||||
node, err = r.readNode(node, bit)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if node == nodeCount {
|
||||
// Record is empty
|
||||
return 0, nil
|
||||
} else if node > nodeCount {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
return 0, newInvalidDatabaseError("invalid node in search tree")
|
||||
}
|
||||
|
||||
func (r *Reader) readNode(nodeNumber uint, index uint) (uint, error) {
|
||||
RecordSize := r.Metadata.RecordSize
|
||||
|
||||
baseOffset := nodeNumber * RecordSize / 4
|
||||
|
||||
var nodeBytes []byte
|
||||
var prefix uint
|
||||
switch RecordSize {
|
||||
case 24:
|
||||
offset := baseOffset + index*3
|
||||
nodeBytes = r.buffer[offset : offset+3]
|
||||
case 28:
|
||||
prefix = uint(r.buffer[baseOffset+3])
|
||||
if index != 0 {
|
||||
prefix &= 0x0F
|
||||
} else {
|
||||
prefix = (0xF0 & prefix) >> 4
|
||||
}
|
||||
offset := baseOffset + index*4
|
||||
nodeBytes = r.buffer[offset : offset+3]
|
||||
case 32:
|
||||
offset := baseOffset + index*4
|
||||
nodeBytes = r.buffer[offset : offset+4]
|
||||
default:
|
||||
return 0, newInvalidDatabaseError("unknown record size: %d", RecordSize)
|
||||
}
|
||||
return uintFromBytes(prefix, nodeBytes), nil
|
||||
}
|
||||
|
||||
func (r *Reader) retrieveData(pointer uint, result interface{}) error {
|
||||
offset, err := r.resolveDataPointer(pointer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.decode(offset, result)
|
||||
}
|
||||
|
||||
func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) {
|
||||
var resolved = uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize)
|
||||
|
||||
if resolved > uintptr(len(r.buffer)) {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt")
|
||||
}
|
||||
return resolved, nil
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
// +build appengine
|
||||
|
||||
package maxminddb
|
||||
|
||||
import "io/ioutil"
|
||||
|
||||
// Open takes a string path to a MaxMind DB file and returns a Reader
|
||||
// structure or an error. The database file is opened using a memory map,
|
||||
// except on Google App Engine where mmap is not supported; there the database
|
||||
// is loaded into memory. Use the Close method on the Reader object to return
|
||||
// the resources to the system.
|
||||
func Open(file string) (*Reader, error) {
|
||||
bytes, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return FromBytes(bytes)
|
||||
}
|
||||
|
||||
// Close unmaps the database file from virtual memory and returns the
|
||||
// resources to the system. If called on a Reader opened using FromBytes
|
||||
// or Open on Google App Engine, this method sets the underlying buffer
|
||||
// to nil, returning the resources to the system.
|
||||
func (r *Reader) Close() error {
|
||||
r.buffer = nil
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
// +build !appengine
|
||||
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Open takes a string path to a MaxMind DB file and returns a Reader
|
||||
// structure or an error. The database file is opened using a memory map,
|
||||
// except on Google App Engine where mmap is not supported; there the database
|
||||
// is loaded into memory. Use the Close method on the Reader object to return
|
||||
// the resources to the system.
|
||||
func Open(file string) (*Reader, error) {
|
||||
mapFile, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if rerr := mapFile.Close(); rerr != nil {
|
||||
err = rerr
|
||||
}
|
||||
}()
|
||||
|
||||
stats, err := mapFile.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileSize := int(stats.Size())
|
||||
mmap, err := mmap(int(mapFile.Fd()), fileSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, err := FromBytes(mmap)
|
||||
if err != nil {
|
||||
if err2 := munmap(mmap); err2 != nil {
|
||||
// failing to unmap the file is probably the more severe error
|
||||
return nil, err2
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader.hasMappedFile = true
|
||||
runtime.SetFinalizer(reader, (*Reader).Close)
|
||||
return reader, err
|
||||
}
|
||||
|
||||
// Close unmaps the database file from virtual memory and returns the
|
||||
// resources to the system. If called on a Reader opened using FromBytes
|
||||
// or Open on Google App Engine, this method does nothing.
|
||||
func (r *Reader) Close() error {
|
||||
var err error
|
||||
if r.hasMappedFile {
|
||||
runtime.SetFinalizer(r, nil)
|
||||
r.hasMappedFile = false
|
||||
err = munmap(r.buffer)
|
||||
}
|
||||
r.buffer = nil
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
package maxminddb
|
||||
|
||||
import "net"
|
||||
|
||||
// Internal structure used to keep track of nodes we still need to visit.
|
||||
type netNode struct {
|
||||
ip net.IP
|
||||
bit uint
|
||||
pointer uint
|
||||
}
|
||||
|
||||
// Networks represents a set of subnets that we are iterating over.
|
||||
type Networks struct {
|
||||
reader *Reader
|
||||
nodes []netNode // Nodes we still have to visit.
|
||||
lastNode netNode
|
||||
err error
|
||||
}
|
||||
|
||||
// Networks returns an iterator that can be used to traverse all networks in
|
||||
// the database.
|
||||
//
|
||||
// Please note that a MaxMind DB may map IPv4 networks into several locations
|
||||
// in in an IPv6 database. This iterator will iterate over all of these
|
||||
// locations separately.
|
||||
func (r *Reader) Networks() *Networks {
|
||||
s := 4
|
||||
if r.Metadata.IPVersion == 6 {
|
||||
s = 16
|
||||
}
|
||||
return &Networks{
|
||||
reader: r,
|
||||
nodes: []netNode{
|
||||
{
|
||||
ip: make(net.IP, s),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Next prepares the next network for reading with the Network method. It
|
||||
// returns true if there is another network to be processed and false if there
|
||||
// are no more networks or if there is an error.
|
||||
func (n *Networks) Next() bool {
|
||||
for len(n.nodes) > 0 {
|
||||
node := n.nodes[len(n.nodes)-1]
|
||||
n.nodes = n.nodes[:len(n.nodes)-1]
|
||||
|
||||
for {
|
||||
if node.pointer < n.reader.Metadata.NodeCount {
|
||||
ipRight := make(net.IP, len(node.ip))
|
||||
copy(ipRight, node.ip)
|
||||
if len(ipRight) <= int(node.bit>>3) {
|
||||
n.err = newInvalidDatabaseError(
|
||||
"invalid search tree at %v/%v", ipRight, node.bit)
|
||||
return false
|
||||
}
|
||||
ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8))
|
||||
|
||||
rightPointer, err := n.reader.readNode(node.pointer, 1)
|
||||
if err != nil {
|
||||
n.err = err
|
||||
return false
|
||||
}
|
||||
|
||||
node.bit++
|
||||
n.nodes = append(n.nodes, netNode{
|
||||
pointer: rightPointer,
|
||||
ip: ipRight,
|
||||
bit: node.bit,
|
||||
})
|
||||
|
||||
node.pointer, err = n.reader.readNode(node.pointer, 0)
|
||||
if err != nil {
|
||||
n.err = err
|
||||
return false
|
||||
}
|
||||
|
||||
} else if node.pointer > n.reader.Metadata.NodeCount {
|
||||
n.lastNode = node
|
||||
return true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Network returns the current network or an error if there is a problem
|
||||
// decoding the data for the network. It takes a pointer to a result value to
|
||||
// decode the network's data into.
|
||||
func (n *Networks) Network(result interface{}) (*net.IPNet, error) {
|
||||
if err := n.reader.retrieveData(n.lastNode.pointer, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &net.IPNet{
|
||||
IP: n.lastNode.ip,
|
||||
Mask: net.CIDRMask(int(n.lastNode.bit), len(n.lastNode.ip)*8),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Err returns an error, if any, that was encountered during iteration.
|
||||
func (n *Networks) Err() error {
|
||||
return n.err
|
||||
}
|
|
@ -0,0 +1,185 @@
|
|||
package maxminddb
|
||||
|
||||
import "reflect"
|
||||
|
||||
type verifier struct {
|
||||
reader *Reader
|
||||
}
|
||||
|
||||
// Verify checks that the database is valid. It validates the search tree,
|
||||
// the data section, and the metadata section. This verifier is stricter than
|
||||
// the specification and may return errors on databases that are readable.
|
||||
func (r *Reader) Verify() error {
|
||||
v := verifier{r}
|
||||
if err := v.verifyMetadata(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return v.verifyDatabase()
|
||||
}
|
||||
|
||||
func (v *verifier) verifyMetadata() error {
|
||||
metadata := v.reader.Metadata
|
||||
|
||||
if metadata.BinaryFormatMajorVersion != 2 {
|
||||
return testError(
|
||||
"binary_format_major_version",
|
||||
2,
|
||||
metadata.BinaryFormatMajorVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.BinaryFormatMinorVersion != 0 {
|
||||
return testError(
|
||||
"binary_format_minor_version",
|
||||
0,
|
||||
metadata.BinaryFormatMinorVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.DatabaseType == "" {
|
||||
return testError(
|
||||
"database_type",
|
||||
"non-empty string",
|
||||
metadata.DatabaseType,
|
||||
)
|
||||
}
|
||||
|
||||
if len(metadata.Description) == 0 {
|
||||
return testError(
|
||||
"description",
|
||||
"non-empty slice",
|
||||
metadata.Description,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.IPVersion != 4 && metadata.IPVersion != 6 {
|
||||
return testError(
|
||||
"ip_version",
|
||||
"4 or 6",
|
||||
metadata.IPVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.RecordSize != 24 &&
|
||||
metadata.RecordSize != 28 &&
|
||||
metadata.RecordSize != 32 {
|
||||
return testError(
|
||||
"record_size",
|
||||
"24, 28, or 32",
|
||||
metadata.RecordSize,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.NodeCount == 0 {
|
||||
return testError(
|
||||
"node_count",
|
||||
"positive integer",
|
||||
metadata.NodeCount,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDatabase() error {
|
||||
offsets, err := v.verifySearchTree()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := v.verifyDataSectionSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return v.verifyDataSection(offsets)
|
||||
}
|
||||
|
||||
func (v *verifier) verifySearchTree() (map[uint]bool, error) {
|
||||
offsets := make(map[uint]bool)
|
||||
|
||||
it := v.reader.Networks()
|
||||
for it.Next() {
|
||||
offset, err := v.reader.resolveDataPointer(it.lastNode.pointer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offsets[uint(offset)] = true
|
||||
}
|
||||
if err := it.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return offsets, nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDataSectionSeparator() error {
|
||||
separatorStart := v.reader.Metadata.NodeCount * v.reader.Metadata.RecordSize / 4
|
||||
|
||||
separator := v.reader.buffer[separatorStart : separatorStart+dataSectionSeparatorSize]
|
||||
|
||||
for _, b := range separator {
|
||||
if b != 0 {
|
||||
return newInvalidDatabaseError("unexpected byte in data separator: %v", separator)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDataSection(offsets map[uint]bool) error {
|
||||
pointerCount := len(offsets)
|
||||
|
||||
decoder := v.reader.decoder
|
||||
|
||||
var offset uint
|
||||
bufferLen := uint(len(decoder.buffer))
|
||||
for offset < bufferLen {
|
||||
var data interface{}
|
||||
rv := reflect.ValueOf(&data)
|
||||
newOffset, err := decoder.decode(offset, rv, 0)
|
||||
if err != nil {
|
||||
return newInvalidDatabaseError("received decoding error (%v) at offset of %v", err, offset)
|
||||
}
|
||||
if newOffset <= offset {
|
||||
return newInvalidDatabaseError("data section offset unexpectedly went from %v to %v", offset, newOffset)
|
||||
}
|
||||
|
||||
pointer := offset
|
||||
|
||||
if _, ok := offsets[pointer]; ok {
|
||||
delete(offsets, pointer)
|
||||
} else {
|
||||
return newInvalidDatabaseError("found data (%v) at %v that the search tree does not point to", data, pointer)
|
||||
}
|
||||
|
||||
offset = newOffset
|
||||
}
|
||||
|
||||
if offset != bufferLen {
|
||||
return newInvalidDatabaseError(
|
||||
"unexpected data at the end of the data section (last offset: %v, end: %v)",
|
||||
offset,
|
||||
bufferLen,
|
||||
)
|
||||
}
|
||||
|
||||
if len(offsets) != 0 {
|
||||
return newInvalidDatabaseError(
|
||||
"found %v pointers (of %v) in the search tree that we did not see in the data section",
|
||||
len(offsets),
|
||||
pointerCount,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testError(
|
||||
field string,
|
||||
expected interface{},
|
||||
actual interface{},
|
||||
) error {
|
||||
return newInvalidDatabaseError(
|
||||
"%v - Expected: %v Actual: %v",
|
||||
field,
|
||||
expected,
|
||||
actual,
|
||||
)
|
||||
}
|
|
@ -20,6 +20,11 @@
|
|||
"revision": "371fbbdaa8987b715bdd21d6adc4c9b20155f748",
|
||||
"revisionTime": "2016-08-11T21:22:31Z"
|
||||
},
|
||||
{
|
||||
"path": "git",
|
||||
"revision": "github.com:apilayer/freegeoip.git",
|
||||
"version": "github.com:apilayer/freegeoip.git"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "z+M6FYl9EKsoZZMLcT0Ktwfk8pI=",
|
||||
"path": "github.com/Azure/azure-pipeline-go/pipeline",
|
||||
|
@ -50,6 +55,12 @@
|
|||
"revision": "e24eb225f15679bbe54f91bfa7da3b00e59b9768",
|
||||
"revisionTime": "2019-02-18T06:46:05Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "hp2pna9yEn9hemIjc7asalxL2Qs=",
|
||||
"path": "github.com/apilayer/freegeoip",
|
||||
"revision": "3f942d1392f6439bda0f67b3c650ce468ebdba8e",
|
||||
"revisionTime": "2018-07-02T11:14:01Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "USkefO0g1U9mr+8hagv3fpSkrxg=",
|
||||
"path": "github.com/aristanetworks/goarista/monotime",
|
||||
|
@ -188,6 +199,12 @@
|
|||
"revision": "0a025b7e63adc15a622f29b0b2c4c3848243bbf6",
|
||||
"revisionTime": "2016-08-13T22:13:03Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "ZxzYc1JwJ3U6kZbw/KGuPko5lSY=",
|
||||
"path": "github.com/howeyc/fsnotify",
|
||||
"revision": "f0c08ee9c60704c1879025f2ae0ff3e000082c13",
|
||||
"revisionTime": "2015-10-03T19:46:02Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "f55gR+6YClh0i/FOhdy66SOUiwY=",
|
||||
"path": "github.com/huin/goupnp",
|
||||
|
@ -352,6 +369,12 @@
|
|||
"revision": "bd9c3193394760d98b2fa6ebb2291f0cd1d06a7d",
|
||||
"revisionTime": "2018-06-06T20:41:48Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "a1WxG0wMDGFnjojQghwu1i1SDhk=",
|
||||
"path": "github.com/oschwald/maxminddb-golang",
|
||||
"revision": "277d39ecb83edd90f26a1fb450ab7e710faa203f",
|
||||
"revisionTime": "2018-08-19T23:01:43Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "Se195FlZ160eaEk/uVx4KdTPSxU=",
|
||||
"path": "github.com/pborman/uuid",
|
||||
|
|
Loading…
Reference in New Issue