Merge branch 'akpm-incoming-1'

* akpm-incoming-1: (176 commits)
  scripts/checkpatch.pl: add check for declaration of pci_device_id
  scripts/checkpatch.pl: add warnings for static char that could be static const char
  checkpatch: version 0.31
  checkpatch: statement/block context analyser should look at sanitised lines
  checkpatch: handle EXPORT_SYMBOL for DEVICE_ATTR and similar
  checkpatch: clean up structure definition macro handline
  checkpatch: update copyright dates
  checkpatch: Add additional attribute #defines
  checkpatch: check for incorrect permissions
  checkpatch: ensure kconfig help checks only apply when we are adding help
  checkpatch: simplify and consolidate "missing space after" checks
  checkpatch: add check for space after struct, union, and enum
  checkpatch: returning errno typically should be negative
  checkpatch: handle casts better fixing false categorisation of : as binary
  checkpatch: ensure we do not collapse bracketed sections into constants
  checkpatch: suggest cleanpatch and cleanfile when appropriate
  checkpatch: types may sit on a line on their own
  checkpatch: fix regressions in "fix handling of leading spaces"
  div64_u64(): improve precision on 32bit platforms
  lib/parser: cleanup match_number()
  ...
This commit is contained in:
Linus Torvalds 2010-10-26 17:15:20 -07:00
commit 31453a9764
209 changed files with 7273 additions and 1744 deletions

View File

@ -374,13 +374,13 @@ Swap: 0 kB
KernelPageSize: 4 kB KernelPageSize: 4 kB
MMUPageSize: 4 kB MMUPageSize: 4 kB
The first of these lines shows the same information as is displayed for the The first of these lines shows the same information as is displayed for the
mapping in /proc/PID/maps. The remaining lines show the size of the mapping, mapping in /proc/PID/maps. The remaining lines show the size of the mapping
the amount of the mapping that is currently resident in RAM, the "proportional (size), the amount of the mapping that is currently resident in RAM (RSS), the
set size” (divide each shared page by the number of processes sharing it), the process' proportional share of this mapping (PSS), the number of clean and
number of clean and dirty shared pages in the mapping, and the number of clean dirty shared pages in the mapping, and the number of clean and dirty private
and dirty private pages in the mapping. The "Referenced" indicates the amount pages in the mapping. The "Referenced" indicates the amount of memory
of memory currently marked as referenced or accessed. currently marked as referenced or accessed.
This file is only present if the CONFIG_MMU kernel configuration option is This file is only present if the CONFIG_MMU kernel configuration option is
enabled. enabled.

View File

@ -0,0 +1,111 @@
Kernel driver apds990x
======================
Supported chips:
Avago APDS990X
Data sheet:
Not freely available
Author:
Samu Onkalo <samu.p.onkalo@nokia.com>
Description
-----------
APDS990x is a combined ambient light and proximity sensor. ALS and proximity
functionality are highly connected. ALS measurement path must be running
while the proximity functionality is enabled.
ALS produces raw measurement values for two channels: Clear channel
(infrared + visible light) and IR only. However, threshold comparisons happen
using clear channel only. Lux value and the threshold level on the HW
might vary quite much depending the spectrum of the light source.
Driver makes necessary conversions to both directions so that user handles
only lux values. Lux value is calculated using information from the both
channels. HW threshold level is calculated from the given lux value to match
with current type of the lightning. Sometimes inaccuracy of the estimations
lead to false interrupt, but that doesn't harm.
ALS contains 4 different gain steps. Driver automatically
selects suitable gain step. After each measurement, reliability of the results
is estimated and new measurement is trigged if necessary.
Platform data can provide tuned values to the conversion formulas if
values are known. Otherwise plain sensor default values are used.
Proximity side is little bit simpler. There is no need for complex conversions.
It produces directly usable values.
Driver controls chip operational state using pm_runtime framework.
Voltage regulators are controlled based on chip operational state.
SYSFS
-----
chip_id
RO - shows detected chip type and version
power_state
RW - enable / disable chip. Uses counting logic
1 enables the chip
0 disables the chip
lux0_input
RO - measured lux value
sysfs_notify called when threshold interrupt occurs
lux0_sensor_range
RO - lux0_input max value. Actually never reaches since sensor tends
to saturate much before that. Real max value varies depending
on the light spectrum etc.
lux0_rate
RW - measurement rate in Hz
lux0_rate_avail
RO - supported measurement rates
lux0_calibscale
RW - calibration value. Set to neutral value by default.
Output results are multiplied with calibscale / calibscale_default
value.
lux0_calibscale_default
RO - neutral calibration value
lux0_thresh_above_value
RW - HI level threshold value. All results above the value
trigs an interrupt. 65535 (i.e. sensor_range) disables the above
interrupt.
lux0_thresh_below_value
RW - LO level threshold value. All results below the value
trigs an interrupt. 0 disables the below interrupt.
prox0_raw
RO - measured proximity value
sysfs_notify called when threshold interrupt occurs
prox0_sensor_range
RO - prox0_raw max value (1023)
prox0_raw_en
RW - enable / disable proximity - uses counting logic
1 enables the proximity
0 disables the proximity
prox0_reporting_mode
RW - trigger / periodic. In "trigger" mode the driver tells two possible
values: 0 or prox0_sensor_range value. 0 means no proximity,
1023 means proximity. This causes minimal number of interrupts.
In "periodic" mode the driver reports all values above
prox0_thresh_above. This causes more interrupts, but it can give
_rough_ estimate about the distance.
prox0_reporting_mode_avail
RO - accepted values to prox0_reporting_mode (trigger, periodic)
prox0_thresh_above_value
RW - threshold level which trigs proximity events.

View File

@ -0,0 +1,116 @@
Kernel driver bh1770glc
=======================
Supported chips:
ROHM BH1770GLC
OSRAM SFH7770
Data sheet:
Not freely available
Author:
Samu Onkalo <samu.p.onkalo@nokia.com>
Description
-----------
BH1770GLC and SFH7770 are combined ambient light and proximity sensors.
ALS and proximity parts operates on their own, but they shares common I2C
interface and interrupt logic. In principle they can run on their own,
but ALS side results are used to estimate reliability of the proximity sensor.
ALS produces 16 bit lux values. The chip contains interrupt logic to produce
low and high threshold interrupts.
Proximity part contains IR-led driver up to 3 IR leds. The chip measures
amount of reflected IR light and produces proximity result. Resolution is
8 bit. Driver supports only one channel. Driver uses ALS results to estimate
reliability of the proximity results. Thus ALS is always running while
proximity detection is needed.
Driver uses threshold interrupts to avoid need for polling the values.
Proximity low interrupt doesn't exists in the chip. This is simulated
by using a delayed work. As long as there is proximity threshold above
interrupts the delayed work is pushed forward. So, when proximity level goes
below the threshold value, there is no interrupt and the delayed work will
finally run. This is handled as no proximity indication.
Chip state is controlled via runtime pm framework when enabled in config.
Calibscale factor is used to hide differences between the chips. By default
value set to neutral state meaning factor of 1.00. To get proper values,
calibrated source of light is needed as a reference. Calibscale factor is set
so that measurement produces about the expected lux value.
SYSFS
-----
chip_id
RO - shows detected chip type and version
power_state
RW - enable / disable chip. Uses counting logic
1 enables the chip
0 disables the chip
lux0_input
RO - measured lux value
sysfs_notify called when threshold interrupt occurs
lux0_sensor_range
RO - lux0_input max value
lux0_rate
RW - measurement rate in Hz
lux0_rate_avail
RO - supported measurement rates
lux0_thresh_above_value
RW - HI level threshold value. All results above the value
trigs an interrupt. 65535 (i.e. sensor_range) disables the above
interrupt.
lux0_thresh_below_value
RW - LO level threshold value. All results below the value
trigs an interrupt. 0 disables the below interrupt.
lux0_calibscale
RW - calibration value. Set to neutral value by default.
Output results are multiplied with calibscale / calibscale_default
value.
lux0_calibscale_default
RO - neutral calibration value
prox0_raw
RO - measured proximity value
sysfs_notify called when threshold interrupt occurs
prox0_sensor_range
RO - prox0_raw max value
prox0_raw_en
RW - enable / disable proximity - uses counting logic
1 enables the proximity
0 disables the proximity
prox0_thresh_above_count
RW - number of proximity interrupts needed before triggering the event
prox0_rate_above
RW - Measurement rate (in Hz) when the level is above threshold
i.e. when proximity on has been reported.
prox0_rate_below
RW - Measurement rate (in Hz) when the level is below threshold
i.e. when proximity off has been reported.
prox0_rate_avail
RO - Supported proximity measurement rates in Hz
prox0_thresh_above0_value
RW - threshold level which trigs proximity events.
Filtered by persistence filter (prox0_thresh_above_count)
prox0_thresh_above1_value
RW - threshold level which trigs event immediately

View File

@ -97,6 +97,33 @@ hpet_open_close(int argc, const char **argv)
void void
hpet_info(int argc, const char **argv) hpet_info(int argc, const char **argv)
{ {
struct hpet_info info;
int fd;
if (argc != 1) {
fprintf(stderr, "hpet_info: device-name\n");
return;
}
fd = open(argv[0], O_RDONLY);
if (fd < 0) {
fprintf(stderr, "hpet_info: open of %s failed\n", argv[0]);
return;
}
if (ioctl(fd, HPET_INFO, &info) < 0) {
fprintf(stderr, "hpet_info: failed to get info\n");
goto out;
}
fprintf(stderr, "hpet_info: hi_irqfreq 0x%lx hi_flags 0x%lx ",
info.hi_ireqfreq, info.hi_flags);
fprintf(stderr, "hi_hpet %d hi_timer %d\n",
info.hi_hpet, info.hi_timer);
out:
close(fd);
return;
} }
void void

View File

@ -46,7 +46,7 @@ use constant HIGH_KSWAPD_LATENCY => 20;
use constant HIGH_KSWAPD_REWAKEUP => 21; use constant HIGH_KSWAPD_REWAKEUP => 21;
use constant HIGH_NR_SCANNED => 22; use constant HIGH_NR_SCANNED => 22;
use constant HIGH_NR_TAKEN => 23; use constant HIGH_NR_TAKEN => 23;
use constant HIGH_NR_RECLAIM => 24; use constant HIGH_NR_RECLAIMED => 24;
use constant HIGH_NR_CONTIG_DIRTY => 25; use constant HIGH_NR_CONTIG_DIRTY => 25;
my %perprocesspid; my %perprocesspid;
@ -58,11 +58,13 @@ my $opt_read_procstat;
my $total_wakeup_kswapd; my $total_wakeup_kswapd;
my ($total_direct_reclaim, $total_direct_nr_scanned); my ($total_direct_reclaim, $total_direct_nr_scanned);
my ($total_direct_latency, $total_kswapd_latency); my ($total_direct_latency, $total_kswapd_latency);
my ($total_direct_nr_reclaimed);
my ($total_direct_writepage_file_sync, $total_direct_writepage_file_async); my ($total_direct_writepage_file_sync, $total_direct_writepage_file_async);
my ($total_direct_writepage_anon_sync, $total_direct_writepage_anon_async); my ($total_direct_writepage_anon_sync, $total_direct_writepage_anon_async);
my ($total_kswapd_nr_scanned, $total_kswapd_wake); my ($total_kswapd_nr_scanned, $total_kswapd_wake);
my ($total_kswapd_writepage_file_sync, $total_kswapd_writepage_file_async); my ($total_kswapd_writepage_file_sync, $total_kswapd_writepage_file_async);
my ($total_kswapd_writepage_anon_sync, $total_kswapd_writepage_anon_async); my ($total_kswapd_writepage_anon_sync, $total_kswapd_writepage_anon_async);
my ($total_kswapd_nr_reclaimed);
# Catch sigint and exit on request # Catch sigint and exit on request
my $sigint_report = 0; my $sigint_report = 0;
@ -104,7 +106,7 @@ my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
my $regex_kswapd_sleep_default = 'nid=([0-9]*)'; my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)'; my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)';
my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) contig_taken=([0-9]*) contig_dirty=([0-9]*) contig_failed=([0-9]*)'; my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) contig_taken=([0-9]*) contig_dirty=([0-9]*) contig_failed=([0-9]*)';
my $regex_lru_shrink_inactive_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*)'; my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) zid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)'; my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)'; my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)';
@ -203,8 +205,8 @@ $regex_lru_shrink_inactive = generate_traceevent_regex(
"vmscan/mm_vmscan_lru_shrink_inactive", "vmscan/mm_vmscan_lru_shrink_inactive",
$regex_lru_shrink_inactive_default, $regex_lru_shrink_inactive_default,
"nid", "zid", "nid", "zid",
"lru", "nr_scanned", "nr_reclaimed", "priority",
"nr_scanned", "nr_reclaimed", "priority"); "flags");
$regex_lru_shrink_active = generate_traceevent_regex( $regex_lru_shrink_active = generate_traceevent_regex(
"vmscan/mm_vmscan_lru_shrink_active", "vmscan/mm_vmscan_lru_shrink_active",
$regex_lru_shrink_active_default, $regex_lru_shrink_active_default,
@ -375,6 +377,16 @@ EVENT_PROCESS:
my $nr_contig_dirty = $7; my $nr_contig_dirty = $7;
$perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned; $perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
$perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty; $perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
} elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") {
$details = $5;
if ($details !~ /$regex_lru_shrink_inactive/o) {
print "WARNING: Failed to parse mm_vmscan_lru_shrink_inactive as expected\n";
print " $details\n";
print " $regex_lru_shrink_inactive/o\n";
next;
}
my $nr_reclaimed = $4;
$perprocesspid{$process_pid}->{HIGH_NR_RECLAIMED} += $nr_reclaimed;
} elsif ($tracepoint eq "mm_vmscan_writepage") { } elsif ($tracepoint eq "mm_vmscan_writepage") {
$details = $5; $details = $5;
if ($details !~ /$regex_writepage/o) { if ($details !~ /$regex_writepage/o) {
@ -464,8 +476,8 @@ sub dump_stats {
# Print out process activity # Print out process activity
printf("\n"); printf("\n");
printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s\n", "Process", "Direct", "Wokeup", "Pages", "Pages", "Pages", "Time"); printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s %8s\n", "Process", "Direct", "Wokeup", "Pages", "Pages", "Pages", "Pages", "Time");
printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s\n", "details", "Rclms", "Kswapd", "Scanned", "Sync-IO", "ASync-IO", "Stalled"); printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s %8s\n", "details", "Rclms", "Kswapd", "Scanned", "Rclmed", "Sync-IO", "ASync-IO", "Stalled");
foreach $process_pid (keys %stats) { foreach $process_pid (keys %stats) {
if (!$stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN}) { if (!$stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN}) {
@ -475,6 +487,7 @@ sub dump_stats {
$total_direct_reclaim += $stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN}; $total_direct_reclaim += $stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN};
$total_wakeup_kswapd += $stats{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD}; $total_wakeup_kswapd += $stats{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD};
$total_direct_nr_scanned += $stats{$process_pid}->{HIGH_NR_SCANNED}; $total_direct_nr_scanned += $stats{$process_pid}->{HIGH_NR_SCANNED};
$total_direct_nr_reclaimed += $stats{$process_pid}->{HIGH_NR_RECLAIMED};
$total_direct_writepage_file_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC}; $total_direct_writepage_file_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
$total_direct_writepage_anon_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC}; $total_direct_writepage_anon_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
$total_direct_writepage_file_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC}; $total_direct_writepage_file_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};
@ -489,11 +502,12 @@ sub dump_stats {
$index++; $index++;
} }
printf("%-" . $max_strlen . "s %8d %10d %8u %8u %8u %8.3f", printf("%-" . $max_strlen . "s %8d %10d %8u %8u %8u %8u %8.3f",
$process_pid, $process_pid,
$stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN}, $stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN},
$stats{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD}, $stats{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD},
$stats{$process_pid}->{HIGH_NR_SCANNED}, $stats{$process_pid}->{HIGH_NR_SCANNED},
$stats{$process_pid}->{HIGH_NR_RECLAIMED},
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC}, $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC},
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC}, $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC},
$this_reclaim_delay / 1000); $this_reclaim_delay / 1000);
@ -529,8 +543,8 @@ sub dump_stats {
# Print out kswapd activity # Print out kswapd activity
printf("\n"); printf("\n");
printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s\n", "Kswapd", "Kswapd", "Order", "Pages", "Pages", "Pages"); printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s\n", "Kswapd", "Kswapd", "Order", "Pages", "Pages", "Pages", "Pages");
printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s\n", "Instance", "Wakeups", "Re-wakeup", "Scanned", "Sync-IO", "ASync-IO"); printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s\n", "Instance", "Wakeups", "Re-wakeup", "Scanned", "Rclmed", "Sync-IO", "ASync-IO");
foreach $process_pid (keys %stats) { foreach $process_pid (keys %stats) {
if (!$stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE}) { if (!$stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE}) {
@ -539,16 +553,18 @@ sub dump_stats {
$total_kswapd_wake += $stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE}; $total_kswapd_wake += $stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE};
$total_kswapd_nr_scanned += $stats{$process_pid}->{HIGH_NR_SCANNED}; $total_kswapd_nr_scanned += $stats{$process_pid}->{HIGH_NR_SCANNED};
$total_kswapd_nr_reclaimed += $stats{$process_pid}->{HIGH_NR_RECLAIMED};
$total_kswapd_writepage_file_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC}; $total_kswapd_writepage_file_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
$total_kswapd_writepage_anon_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC}; $total_kswapd_writepage_anon_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
$total_kswapd_writepage_file_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC}; $total_kswapd_writepage_file_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};
$total_kswapd_writepage_anon_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC}; $total_kswapd_writepage_anon_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC};
printf("%-" . $max_strlen . "s %8d %10d %8u %8i %8u", printf("%-" . $max_strlen . "s %8d %10d %8u %8u %8i %8u",
$process_pid, $process_pid,
$stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE}, $stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE},
$stats{$process_pid}->{HIGH_KSWAPD_REWAKEUP}, $stats{$process_pid}->{HIGH_KSWAPD_REWAKEUP},
$stats{$process_pid}->{HIGH_NR_SCANNED}, $stats{$process_pid}->{HIGH_NR_SCANNED},
$stats{$process_pid}->{HIGH_NR_RECLAIMED},
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC}, $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC},
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC}); $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC});
@ -579,6 +595,7 @@ sub dump_stats {
print "\nSummary\n"; print "\nSummary\n";
print "Direct reclaims: $total_direct_reclaim\n"; print "Direct reclaims: $total_direct_reclaim\n";
print "Direct reclaim pages scanned: $total_direct_nr_scanned\n"; print "Direct reclaim pages scanned: $total_direct_nr_scanned\n";
print "Direct reclaim pages reclaimed: $total_direct_nr_reclaimed\n";
print "Direct reclaim write file sync I/O: $total_direct_writepage_file_sync\n"; print "Direct reclaim write file sync I/O: $total_direct_writepage_file_sync\n";
print "Direct reclaim write anon sync I/O: $total_direct_writepage_anon_sync\n"; print "Direct reclaim write anon sync I/O: $total_direct_writepage_anon_sync\n";
print "Direct reclaim write file async I/O: $total_direct_writepage_file_async\n"; print "Direct reclaim write file async I/O: $total_direct_writepage_file_async\n";
@ -588,6 +605,7 @@ sub dump_stats {
print "\n"; print "\n";
print "Kswapd wakeups: $total_kswapd_wake\n"; print "Kswapd wakeups: $total_kswapd_wake\n";
print "Kswapd pages scanned: $total_kswapd_nr_scanned\n"; print "Kswapd pages scanned: $total_kswapd_nr_scanned\n";
print "Kswapd pages reclaimed: $total_kswapd_nr_reclaimed\n";
print "Kswapd reclaim write file sync I/O: $total_kswapd_writepage_file_sync\n"; print "Kswapd reclaim write file sync I/O: $total_kswapd_writepage_file_sync\n";
print "Kswapd reclaim write anon sync I/O: $total_kswapd_writepage_anon_sync\n"; print "Kswapd reclaim write anon sync I/O: $total_kswapd_writepage_anon_sync\n";
print "Kswapd reclaim write file async I/O: $total_kswapd_writepage_file_async\n"; print "Kswapd reclaim write file async I/O: $total_kswapd_writepage_file_async\n";
@ -612,6 +630,7 @@ sub aggregate_perprocesspid() {
$perprocess{$process}->{MM_VMSCAN_WAKEUP_KSWAPD} += $perprocesspid{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD}; $perprocess{$process}->{MM_VMSCAN_WAKEUP_KSWAPD} += $perprocesspid{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD};
$perprocess{$process}->{HIGH_KSWAPD_REWAKEUP} += $perprocesspid{$process_pid}->{HIGH_KSWAPD_REWAKEUP}; $perprocess{$process}->{HIGH_KSWAPD_REWAKEUP} += $perprocesspid{$process_pid}->{HIGH_KSWAPD_REWAKEUP};
$perprocess{$process}->{HIGH_NR_SCANNED} += $perprocesspid{$process_pid}->{HIGH_NR_SCANNED}; $perprocess{$process}->{HIGH_NR_SCANNED} += $perprocesspid{$process_pid}->{HIGH_NR_SCANNED};
$perprocess{$process}->{HIGH_NR_RECLAIMED} += $perprocesspid{$process_pid}->{HIGH_NR_RECLAIMED};
$perprocess{$process}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC}; $perprocess{$process}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
$perprocess{$process}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC}; $perprocess{$process}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
$perprocess{$process}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC}; $perprocess{$process}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};

View File

@ -0,0 +1,162 @@
====================
HIGH MEMORY HANDLING
====================
By: Peter Zijlstra <a.p.zijlstra@chello.nl>
Contents:
(*) What is high memory?
(*) Temporary virtual mappings.
(*) Using kmap_atomic.
(*) Cost of temporary mappings.
(*) i386 PAE.
====================
WHAT IS HIGH MEMORY?
====================
High memory (highmem) is used when the size of physical memory approaches or
exceeds the maximum size of virtual memory. At that point it becomes
impossible for the kernel to keep all of the available physical memory mapped
at all times. This means the kernel needs to start using temporary mappings of
the pieces of physical memory that it wants to access.
The part of (physical) memory not covered by a permanent mapping is what we
refer to as 'highmem'. There are various architecture dependent constraints on
where exactly that border lies.
In the i386 arch, for example, we choose to map the kernel into every process's
VM space so that we don't have to pay the full TLB invalidation costs for
kernel entry/exit. This means the available virtual memory space (4GiB on
i386) has to be divided between user and kernel space.
The traditional split for architectures using this approach is 3:1, 3GiB for
userspace and the top 1GiB for kernel space:
+--------+ 0xffffffff
| Kernel |
+--------+ 0xc0000000
| |
| User |
| |
+--------+ 0x00000000
This means that the kernel can at most map 1GiB of physical memory at any one
time, but because we need virtual address space for other things - including
temporary maps to access the rest of the physical memory - the actual direct
map will typically be less (usually around ~896MiB).
Other architectures that have mm context tagged TLBs can have separate kernel
and user maps. Some hardware (like some ARMs), however, have limited virtual
space when they use mm context tags.
==========================
TEMPORARY VIRTUAL MAPPINGS
==========================
The kernel contains several ways of creating temporary mappings:
(*) vmap(). This can be used to make a long duration mapping of multiple
physical pages into a contiguous virtual space. It needs global
synchronization to unmap.
(*) kmap(). This permits a short duration mapping of a single page. It needs
global synchronization, but is amortized somewhat. It is also prone to
deadlocks when using in a nested fashion, and so it is not recommended for
new code.
(*) kmap_atomic(). This permits a very short duration mapping of a single
page. Since the mapping is restricted to the CPU that issued it, it
performs well, but the issuing task is therefore required to stay on that
CPU until it has finished, lest some other task displace its mappings.
kmap_atomic() may also be used by interrupt contexts, since it is does not
sleep and the caller may not sleep until after kunmap_atomic() is called.
It may be assumed that k[un]map_atomic() won't fail.
=================
USING KMAP_ATOMIC
=================
When and where to use kmap_atomic() is straightforward. It is used when code
wants to access the contents of a page that might be allocated from high memory
(see __GFP_HIGHMEM), for example a page in the pagecache. The API has two
functions, and they can be used in a manner similar to the following:
/* Find the page of interest. */
struct page *page = find_get_page(mapping, offset);
/* Gain access to the contents of that page. */
void *vaddr = kmap_atomic(page);
/* Do something to the contents of that page. */
memset(vaddr, 0, PAGE_SIZE);
/* Unmap that page. */
kunmap_atomic(vaddr);
Note that the kunmap_atomic() call takes the result of the kmap_atomic() call
not the argument.
If you need to map two pages because you want to copy from one page to
another you need to keep the kmap_atomic calls strictly nested, like:
vaddr1 = kmap_atomic(page1);
vaddr2 = kmap_atomic(page2);
memcpy(vaddr1, vaddr2, PAGE_SIZE);
kunmap_atomic(vaddr2);
kunmap_atomic(vaddr1);
==========================
COST OF TEMPORARY MAPPINGS
==========================
The cost of creating temporary mappings can be quite high. The arch has to
manipulate the kernel's page tables, the data TLB and/or the MMU's registers.
If CONFIG_HIGHMEM is not set, then the kernel will try and create a mapping
simply with a bit of arithmetic that will convert the page struct address into
a pointer to the page contents rather than juggling mappings about. In such a
case, the unmap operation may be a null operation.
If CONFIG_MMU is not set, then there can be no temporary mappings and no
highmem. In such a case, the arithmetic approach will also be used.
========
i386 PAE
========
The i386 arch, under some circumstances, will permit you to stick up to 64GiB
of RAM into your 32-bit machine. This has a number of consequences:
(*) Linux needs a page-frame structure for each page in the system and the
pageframes need to live in the permanent mapping, which means:
(*) you can have 896M/sizeof(struct page) page-frames at most; with struct
page being 32-bytes that would end up being something in the order of 112G
worth of pages; the kernel, however, needs to store more than just
page-frames in that memory...
(*) PAE makes your page tables larger - which slows the system down as more
data has to be accessed to traverse in TLB fills and the like. One
advantage is that PAE has more PTE bits and can provide advanced features
like NX and PAT.
The general recommendation is that you don't use more than 8GiB on a 32-bit
machine - although more might work for you and your workload, you're pretty
much on your own - don't expect kernel developers to really care much if things
come apart.

View File

@ -657,7 +657,7 @@ ARM/FARADAY FA526 PORT
M: Hans Ulli Kroll <ulli.kroll@googlemail.com> M: Hans Ulli Kroll <ulli.kroll@googlemail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
T: git://git.berlios.de/gemini-board T: git git://git.berlios.de/gemini-board
F: arch/arm/mm/*-fa* F: arch/arm/mm/*-fa*
ARM/FOOTBRIDGE ARCHITECTURE ARM/FOOTBRIDGE ARCHITECTURE
@ -672,7 +672,7 @@ ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
M: Sascha Hauer <kernel@pengutronix.de> M: Sascha Hauer <kernel@pengutronix.de>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
T: git://git.pengutronix.de/git/imx/linux-2.6.git T: git git://git.pengutronix.de/git/imx/linux-2.6.git
F: arch/arm/mach-mx*/ F: arch/arm/mach-mx*/
F: arch/arm/plat-mxc/ F: arch/arm/plat-mxc/
@ -710,8 +710,7 @@ ARM/INCOME PXA270 SUPPORT
M: Marek Vasut <marek.vasut@gmail.com> M: Marek Vasut <marek.vasut@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
F: arch/arm/mach-pxa/income.c F: arch/arm/mach-pxa/colibri-pxa270-income.c
F: arch/arm/mach-pxa/include/mach-pxa/income.h
ARM/INTEL IOP32X ARM ARCHITECTURE ARM/INTEL IOP32X ARM ARCHITECTURE
M: Lennert Buytenhek <kernel@wantstofly.org> M: Lennert Buytenhek <kernel@wantstofly.org>
@ -758,13 +757,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
F: arch/arm/mach-ixp4xx/ F: arch/arm/mach-ixp4xx/
ARM/INTEL RESEARCH IMOTE 2 MACHINE SUPPORT ARM/INTEL RESEARCH IMOTE/STARGATE 2 MACHINE SUPPORT
M: Jonathan Cameron <jic23@cam.ac.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-pxa/imote2.c
ARM/INTEL RESEARCH STARGATE 2 MACHINE SUPPORT
M: Jonathan Cameron <jic23@cam.ac.uk> M: Jonathan Cameron <jic23@cam.ac.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
@ -929,40 +922,20 @@ W: http://www.fluff.org/ben/linux/
S: Maintained S: Maintained
F: arch/arm/mach-s3c2410/ F: arch/arm/mach-s3c2410/
ARM/S3C2440 ARM ARCHITECTURE ARM/S3C244x ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org> M: Ben Dooks <ben-linux@fluff.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/ W: http://www.fluff.org/ben/linux/
S: Maintained S: Maintained
F: arch/arm/mach-s3c2440/ F: arch/arm/mach-s3c2440/
ARM/S3C2442 ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c2442/
ARM/S3C2443 ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c2443/ F: arch/arm/mach-s3c2443/
ARM/S3C6400 ARM ARCHITECTURE ARM/S3C64xx ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org> M: Ben Dooks <ben-linux@fluff.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/ W: http://www.fluff.org/ben/linux/
S: Maintained S: Maintained
F: arch/arm/mach-s3c6400/ F: arch/arm/mach-s3c64xx/
ARM/S3C6410 ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c6410/
ARM/S5P ARM ARCHITECTURES ARM/S5P ARM ARCHITECTURES
M: Kukjin Kim <kgene.kim@samsung.com> M: Kukjin Kim <kgene.kim@samsung.com>
@ -3867,7 +3840,7 @@ F: drivers/net/wireless/mwl8k.c
MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
M: Nicolas Pitre <nico@fluxnic.net> M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes S: Odd Fixes
F: drivers/mmc/host/mvsdio.* F: drivers/mmc/host/mvsdio.*
MARVELL YUKON / SYSKONNECT DRIVER MARVELL YUKON / SYSKONNECT DRIVER
M: Mirko Lindner <mlindner@syskonnect.de> M: Mirko Lindner <mlindner@syskonnect.de>
@ -4958,7 +4931,7 @@ RCUTORTURE MODULE
M: Josh Triplett <josh@freedesktop.org> M: Josh Triplett <josh@freedesktop.org>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
S: Supported S: Supported
T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/torture.txt F: Documentation/RCU/torture.txt
F: kernel/rcutorture.c F: kernel/rcutorture.c
@ -4983,7 +4956,7 @@ M: Dipankar Sarma <dipankar@in.ibm.com>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
W: http://www.rdrop.com/users/paulmck/rclock/ W: http://www.rdrop.com/users/paulmck/rclock/
S: Supported S: Supported
T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/ F: Documentation/RCU/
F: include/linux/rcu* F: include/linux/rcu*
F: include/linux/srcu* F: include/linux/srcu*
@ -6141,13 +6114,6 @@ L: linux-usb@vger.kernel.org
S: Maintained S: Maintained
F: drivers/usb/serial/option.c F: drivers/usb/serial/option.c
USB OV511 DRIVER
M: Mark McClelland <mmcclell@bigfoot.com>
L: linux-usb@vger.kernel.org
W: http://alpha.dyndns.org/ov511/
S: Maintained
F: drivers/media/video/ov511.*
USB PEGASUS DRIVER USB PEGASUS DRIVER
M: Petko Manolov <petkan@users.sourceforge.net> M: Petko Manolov <petkan@users.sourceforge.net>
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
@ -6308,16 +6274,6 @@ S: Supported
F: drivers/usb/host/xhci* F: drivers/usb/host/xhci*
F: drivers/usb/host/pci-quirks* F: drivers/usb/host/pci-quirks*
USB ZC0301 DRIVER
M: Luca Risolia <luca.risolia@studio.unibo.it>
L: linux-usb@vger.kernel.org
L: linux-media@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
W: http://www.linux-projects.org
S: Maintained
F: Documentation/video4linux/zc0301.txt
F: drivers/media/video/zc0301/
USB ZD1201 DRIVER USB ZD1201 DRIVER
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
W: http://linux-lc100020.sourceforge.net W: http://linux-lc100020.sourceforge.net

View File

@ -55,6 +55,9 @@ config ZONE_DMA
bool bool
default y default y
config ARCH_DMA_ADDR_T_64BIT
def_bool y
config NEED_DMA_MAP_STATE config NEED_DMA_MAP_STATE
def_bool y def_bool y

View File

@ -247,7 +247,7 @@ struct el_MCPCIA_uncorrected_frame_mcheck {
#define vip volatile int __force * #define vip volatile int __force *
#define vuip volatile unsigned int __force * #define vuip volatile unsigned int __force *
#ifdef MCPCIA_ONE_HAE_WINDOW #ifndef MCPCIA_ONE_HAE_WINDOW
#define MCPCIA_FROB_MMIO \ #define MCPCIA_FROB_MMIO \
if (__mcpcia_is_mmio(hose)) { \ if (__mcpcia_is_mmio(hose)) { \
set_hae(hose & 0xffffffff); \ set_hae(hose & 0xffffffff); \

View File

@ -1,6 +1,9 @@
#ifndef __ALPHA_T2__H__ #ifndef __ALPHA_T2__H__
#define __ALPHA_T2__H__ #define __ALPHA_T2__H__
/* Fit everything into one 128MB HAE window. */
#define T2_ONE_HAE_WINDOW 1
#include <linux/types.h> #include <linux/types.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/compiler.h> #include <asm/compiler.h>
@ -19,7 +22,7 @@
* *
*/ */
#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 26 bits */ #define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 27 bits */
/* GAMMA-SABLE is a SABLE with EV5-based CPUs */ /* GAMMA-SABLE is a SABLE with EV5-based CPUs */
/* All LYNX machines, EV4 or EV5, use the GAMMA bias also */ /* All LYNX machines, EV4 or EV5, use the GAMMA bias also */
@ -85,7 +88,9 @@
#define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL) #define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL)
#define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL) #define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL)
#ifndef T2_ONE_HAE_WINDOW
#define T2_HAE_ADDRESS T2_HAE_1 #define T2_HAE_ADDRESS T2_HAE_1
#endif
/* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to /* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to
3.8fff.ffff 3.8fff.ffff
@ -429,13 +434,15 @@ extern inline void t2_outl(u32 b, unsigned long addr)
* *
*/ */
#ifdef T2_ONE_HAE_WINDOW
#define t2_set_hae
#else
#define t2_set_hae { \ #define t2_set_hae { \
msb = addr >> 27; \ unsigned long msb = addr >> 27; \
addr &= T2_MEM_R1_MASK; \ addr &= T2_MEM_R1_MASK; \
set_hae(msb); \ set_hae(msb); \
} }
#endif
extern raw_spinlock_t t2_hae_lock;
/* /*
* NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since
@ -446,28 +453,22 @@ extern raw_spinlock_t t2_hae_lock;
__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
{ {
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long result, msb; unsigned long result;
unsigned long flags;
raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae; t2_set_hae;
result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extbl(result, addr & 3); return __kernel_extbl(result, addr & 3);
} }
__EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
{ {
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long result, msb; unsigned long result;
unsigned long flags;
raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae; t2_set_hae;
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extwl(result, addr & 3); return __kernel_extwl(result, addr & 3);
} }
@ -478,59 +479,47 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
__EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr) __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr)
{ {
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long result, msb; unsigned long result;
unsigned long flags;
raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae; t2_set_hae;
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return result & 0xffffffffUL; return result & 0xffffffffUL;
} }
__EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr) __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr)
{ {
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long r0, r1, work, msb; unsigned long r0, r1, work;
unsigned long flags;
raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae; t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18; work = (addr << 5) + T2_SPARSE_MEM + 0x18;
r0 = *(vuip)(work); r0 = *(vuip)(work);
r1 = *(vuip)(work + (4 << 5)); r1 = *(vuip)(work + (4 << 5));
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return r1 << 32 | r0; return r1 << 32 | r0;
} }
__EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr) __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr)
{ {
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb, w; unsigned long w;
unsigned long flags;
raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae; t2_set_hae;
w = __kernel_insbl(b, addr & 3); w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
} }
__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
{ {
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb, w; unsigned long w;
unsigned long flags;
raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae; t2_set_hae;
w = __kernel_inswl(b, addr & 3); w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
} }
/* /*
@ -540,29 +529,22 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
__EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr) __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr)
{ {
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb;
unsigned long flags;
raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae; t2_set_hae;
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
} }
__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
{ {
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb, work; unsigned long work;
unsigned long flags;
raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae; t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18; work = (addr << 5) + T2_SPARSE_MEM + 0x18;
*(vuip)work = b; *(vuip)work = b;
*(vuip)(work + (4 << 5)) = b >> 32; *(vuip)(work + (4 << 5)) = b >> 32;
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
} }
__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) __EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)

View File

@ -318,9 +318,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
} }
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) #define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr))
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
extern pgd_t swapper_pg_dir[1024]; extern pgd_t swapper_pg_dir[1024];

View File

@ -74,8 +74,6 @@
# define DBG(args) # define DBG(args)
#endif #endif
DEFINE_RAW_SPINLOCK(t2_hae_lock);
static volatile unsigned int t2_mcheck_any_expected; static volatile unsigned int t2_mcheck_any_expected;
static volatile unsigned int t2_mcheck_last_taken; static volatile unsigned int t2_mcheck_last_taken;
@ -406,6 +404,7 @@ void __init
t2_init_arch(void) t2_init_arch(void)
{ {
struct pci_controller *hose; struct pci_controller *hose;
struct resource *hae_mem;
unsigned long temp; unsigned long temp;
unsigned int i; unsigned int i;
@ -433,7 +432,13 @@ t2_init_arch(void)
*/ */
pci_isa_hose = hose = alloc_pci_controller(); pci_isa_hose = hose = alloc_pci_controller();
hose->io_space = &ioport_resource; hose->io_space = &ioport_resource;
hose->mem_space = &iomem_resource; hae_mem = alloc_resource();
hae_mem->start = 0;
hae_mem->end = T2_MEM_R1_MASK;
hae_mem->name = pci_hae0_name;
if (request_resource(&iomem_resource, hae_mem) < 0)
printk(KERN_ERR "Failed to request HAE_MEM\n");
hose->mem_space = hae_mem;
hose->index = 0; hose->index = 0;
hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR; hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR;

View File

@ -25,6 +25,9 @@
#ifdef MCPCIA_ONE_HAE_WINDOW #ifdef MCPCIA_ONE_HAE_WINDOW
#define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache) #define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache)
#endif #endif
#ifdef T2_ONE_HAE_WINDOW
#define T2_HAE_ADDRESS (&alpha_mv.hae_cache)
#endif
/* Only a few systems don't define IACK_SC, handling all interrupts through /* Only a few systems don't define IACK_SC, handling all interrupts through
the SRM console. But splitting out that one case from IO() below the SRM console. But splitting out that one case from IO() below

View File

@ -35,9 +35,9 @@ extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
extern void *kmap(struct page *page); extern void *kmap(struct page *page);
extern void kunmap(struct page *page); extern void kunmap(struct page *page);
extern void *kmap_atomic(struct page *page, enum km_type type); extern void *__kmap_atomic(struct page *page);
extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(const void *ptr); extern struct page *kmap_atomic_to_page(const void *ptr);
#endif #endif

View File

@ -263,17 +263,15 @@ extern struct page *empty_zero_page;
#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
#define pte_offset_map(dir,addr) (__pte_map(dir, KM_PTE0) + __pte_index(addr)) #define pte_offset_map(dir,addr) (__pte_map(dir) + __pte_index(addr))
#define pte_offset_map_nested(dir,addr) (__pte_map(dir, KM_PTE1) + __pte_index(addr)) #define pte_unmap(pte) __pte_unmap(pte)
#define pte_unmap(pte) __pte_unmap(pte, KM_PTE0)
#define pte_unmap_nested(pte) __pte_unmap(pte, KM_PTE1)
#ifndef CONFIG_HIGHPTE #ifndef CONFIG_HIGHPTE
#define __pte_map(dir,km) pmd_page_vaddr(*(dir)) #define __pte_map(dir) pmd_page_vaddr(*(dir))
#define __pte_unmap(pte,km) do { } while (0) #define __pte_unmap(pte) do { } while (0)
#else #else
#define __pte_map(dir,km) ((pte_t *)kmap_atomic(pmd_page(*(dir)), km) + PTRS_PER_PTE) #define __pte_map(dir) ((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE)
#define __pte_unmap(pte,km) kunmap_atomic((pte - PTRS_PER_PTE), km) #define __pte_unmap(pte) kunmap_atomic((pte - PTRS_PER_PTE))
#endif #endif
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)

View File

@ -358,8 +358,7 @@ static int calc_clk_div(struct clk *clk, unsigned long rate,
int i, found = 0, __div = 0, __pdiv = 0; int i, found = 0, __div = 0, __pdiv = 0;
/* Don't exceed the maximum rate */ /* Don't exceed the maximum rate */
max_rate = max(max(clk_pll1.rate / 4, clk_pll2.rate / 4), max_rate = max3(clk_pll1.rate / 4, clk_pll2.rate / 4, clk_xtali.rate / 4);
clk_xtali.rate / 4);
rate = min(rate, max_rate); rate = min(rate, max_rate);
/* /*

View File

@ -89,13 +89,13 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
* open-code the spin-locking. * open-code the spin-locking.
*/ */
ptl = pte_lockptr(vma->vm_mm, pmd); ptl = pte_lockptr(vma->vm_mm, pmd);
pte = pte_offset_map_nested(pmd, address); pte = pte_offset_map(pmd, address);
spin_lock(ptl); spin_lock(ptl);
ret = do_adjust_pte(vma, address, pfn, pte); ret = do_adjust_pte(vma, address, pfn, pte);
spin_unlock(ptl); spin_unlock(ptl);
pte_unmap_nested(pte); pte_unmap(pte);
return ret; return ret;
} }

View File

@ -36,18 +36,17 @@ void kunmap(struct page *page)
} }
EXPORT_SYMBOL(kunmap); EXPORT_SYMBOL(kunmap);
void *kmap_atomic(struct page *page, enum km_type type) void *__kmap_atomic(struct page *page)
{ {
unsigned int idx; unsigned int idx;
unsigned long vaddr; unsigned long vaddr;
void *kmap; void *kmap;
int type;
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic(type);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
/* /*
* There is no cache coherency issue when non VIVT, so force the * There is no cache coherency issue when non VIVT, so force the
@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
if (kmap) if (kmap)
return kmap; return kmap;
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id(); idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type)
return (void *)vaddr; return (void *)vaddr;
} }
EXPORT_SYMBOL(kmap_atomic); EXPORT_SYMBOL(__kmap_atomic);
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) void __kunmap_atomic(void *kvaddr)
{ {
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); int idx, type;
if (kvaddr >= (void *)FIXADDR_START) { if (kvaddr >= (void *)FIXADDR_START) {
type = kmap_atomic_idx_pop();
idx = type + KM_TYPE_NR * smp_processor_id();
if (cache_is_vivt()) if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
@ -103,15 +107,16 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
} }
pagefault_enable(); pagefault_enable();
} }
EXPORT_SYMBOL(kunmap_atomic_notypecheck); EXPORT_SYMBOL(__kunmap_atomic);
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) void *kmap_atomic_pfn(unsigned long pfn)
{ {
unsigned int idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
pagefault_disable(); pagefault_disable();
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id(); idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM

View File

@ -57,9 +57,9 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
goto no_pte; goto no_pte;
init_pmd = pmd_offset(init_pgd, 0); init_pmd = pmd_offset(init_pgd, 0);
init_pte = pte_offset_map_nested(init_pmd, 0); init_pte = pte_offset_map(init_pmd, 0);
set_pte_ext(new_pte, *init_pte, 0); set_pte_ext(new_pte, *init_pte, 0);
pte_unmap_nested(init_pte); pte_unmap(init_pte);
pte_unmap(new_pte); pte_unmap(new_pte);
} }

View File

@ -319,9 +319,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_offset_kernel(dir, address) \ #define pte_offset_kernel(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address) #define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
struct vm_area_struct; struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct * vma, extern void update_mmu_cache(struct vm_area_struct * vma,

View File

@ -15,14 +15,6 @@
#define LFLUSH_I_AND_D 0x00000808 #define LFLUSH_I_AND_D 0x00000808
#define LSIGTRAP 5 #define LSIGTRAP 5
/* process bits for task_struct.flags */
#define PF_TRACESYS_OFF 3
#define PF_TRACESYS_BIT 5
#define PF_PTRACED_OFF 3
#define PF_PTRACED_BIT 4
#define PF_DTRACE_OFF 1
#define PF_DTRACE_BIT 5
/* /*
* NOTE! The single-stepping code assumes that all interrupt handlers * NOTE! The single-stepping code assumes that all interrupt handlers
* start by saving SYSCFG on the stack with their first instruction. * start by saving SYSCFG on the stack with their first instruction.

View File

@ -248,10 +248,8 @@ static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long addre
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
#define pte_offset_map(dir, address) \ #define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT) #define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))

View File

@ -112,12 +112,11 @@ extern struct page *kmap_atomic_to_page(void *ptr);
(void *) damlr; \ (void *) damlr; \
}) })
static inline void *kmap_atomic(struct page *page, enum km_type type) static inline void *kmap_atomic_primary(struct page *page, enum km_type type)
{ {
unsigned long paddr; unsigned long paddr;
pagefault_disable(); pagefault_disable();
debug_kmap_atomic(type);
paddr = page_to_phys(page); paddr = page_to_phys(page);
switch (type) { switch (type) {
@ -125,14 +124,6 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
case 1: return __kmap_atomic_primary(1, paddr, 3); case 1: return __kmap_atomic_primary(1, paddr, 3);
case 2: return __kmap_atomic_primary(2, paddr, 4); case 2: return __kmap_atomic_primary(2, paddr, 4);
case 3: return __kmap_atomic_primary(3, paddr, 5); case 3: return __kmap_atomic_primary(3, paddr, 5);
case 4: return __kmap_atomic_primary(4, paddr, 6);
case 5: return __kmap_atomic_primary(5, paddr, 7);
case 6: return __kmap_atomic_primary(6, paddr, 8);
case 7: return __kmap_atomic_primary(7, paddr, 9);
case 8: return __kmap_atomic_primary(8, paddr, 10);
case 9 ... 9 + NR_TLB_LINES - 1:
return __kmap_atomic_secondary(type - 9, paddr);
default: default:
BUG(); BUG();
@ -152,22 +143,13 @@ do { \
asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \ asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
} while(0) } while(0)
static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
{ {
switch (type) { switch (type) {
case 0: __kunmap_atomic_primary(0, 2); break; case 0: __kunmap_atomic_primary(0, 2); break;
case 1: __kunmap_atomic_primary(1, 3); break; case 1: __kunmap_atomic_primary(1, 3); break;
case 2: __kunmap_atomic_primary(2, 4); break; case 2: __kunmap_atomic_primary(2, 4); break;
case 3: __kunmap_atomic_primary(3, 5); break; case 3: __kunmap_atomic_primary(3, 5); break;
case 4: __kunmap_atomic_primary(4, 6); break;
case 5: __kunmap_atomic_primary(5, 7); break;
case 6: __kunmap_atomic_primary(6, 8); break;
case 7: __kunmap_atomic_primary(7, 9); break;
case 8: __kunmap_atomic_primary(8, 10); break;
case 9 ... 9 + NR_TLB_LINES - 1:
__kunmap_atomic_secondary(type - 9, kvaddr);
break;
default: default:
BUG(); BUG();
@ -175,6 +157,9 @@ static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
pagefault_enable(); pagefault_enable();
} }
void *__kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */

View File

@ -451,17 +451,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#if defined(CONFIG_HIGHPTE) #if defined(CONFIG_HIGHPTE)
#define pte_offset_map(dir, address) \ #define pte_offset_map(dir, address) \
((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) ((pte_t *)kmap_atomic(pmd_page(*(dir))) + pte_index(address))
#define pte_offset_map_nested(dir, address) \ #define pte_unmap(pte) kunmap_atomic(pte)
((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
#else #else
#define pte_offset_map(dir, address) \ #define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#endif #endif
/* /*

View File

@ -61,14 +61,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
dampr2 = __get_DAMPR(2); dampr2 = __get_DAMPR(2);
for (i = 0; i < nents; i++) { for (i = 0; i < nents; i++) {
vaddr = kmap_atomic(sg_page(&sg[i]), __KM_CACHE); vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE);
frv_dcache_writeback((unsigned long) vaddr, frv_dcache_writeback((unsigned long) vaddr,
(unsigned long) vaddr + PAGE_SIZE); (unsigned long) vaddr + PAGE_SIZE);
} }
kunmap_atomic(vaddr, __KM_CACHE); kunmap_atomic_primary(vaddr, __KM_CACHE);
if (dampr2) { if (dampr2) {
__set_DAMPR(2, dampr2); __set_DAMPR(2, dampr2);
__set_IAMPR(2, dampr2); __set_IAMPR(2, dampr2);

View File

@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page)
dampr2 = __get_DAMPR(2); dampr2 = __get_DAMPR(2);
vaddr = kmap_atomic(page, __KM_CACHE); vaddr = kmap_atomic_primary(page, __KM_CACHE);
frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);
kunmap_atomic(vaddr, __KM_CACHE); kunmap_atomic_primary(vaddr, __KM_CACHE);
if (dampr2) { if (dampr2) {
__set_DAMPR(2, dampr2); __set_DAMPR(2, dampr2);
@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
dampr2 = __get_DAMPR(2); dampr2 = __get_DAMPR(2);
vaddr = kmap_atomic(page, __KM_CACHE); vaddr = kmap_atomic_primary(page, __KM_CACHE);
start = (start & ~PAGE_MASK) | (unsigned long) vaddr; start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
frv_cache_wback_inv(start, start + len); frv_cache_wback_inv(start, start + len);
kunmap_atomic(vaddr, __KM_CACHE); kunmap_atomic_primary(vaddr, __KM_CACHE);
if (dampr2) { if (dampr2) {
__set_DAMPR(2, dampr2); __set_DAMPR(2, dampr2);

View File

@ -36,3 +36,53 @@ struct page *kmap_atomic_to_page(void *ptr)
{ {
return virt_to_page(ptr); return virt_to_page(ptr);
} }
void *__kmap_atomic(struct page *page)
{
unsigned long paddr;
int type;
pagefault_disable();
type = kmap_atomic_idx_push();
paddr = page_to_phys(page);
switch (type) {
/*
* The first 4 primary maps are reserved for architecture code
*/
case 0: return __kmap_atomic_primary(4, paddr, 6);
case 1: return __kmap_atomic_primary(5, paddr, 7);
case 2: return __kmap_atomic_primary(6, paddr, 8);
case 3: return __kmap_atomic_primary(7, paddr, 9);
case 4: return __kmap_atomic_primary(8, paddr, 10);
case 5 ... 5 + NR_TLB_LINES - 1:
return __kmap_atomic_secondary(type - 5, paddr);
default:
BUG();
return NULL;
}
}
EXPORT_SYMBOL(__kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
int type = kmap_atomic_idx_pop();
switch (type) {
case 0: __kunmap_atomic_primary(4, 6); break;
case 1: __kunmap_atomic_primary(5, 7); break;
case 2: __kunmap_atomic_primary(6, 8); break;
case 3: __kunmap_atomic_primary(7, 9); break;
case 4: __kunmap_atomic_primary(8, 10); break;
case 5 ... 5 + NR_TLB_LINES - 1:
__kunmap_atomic_secondary(type - 5, kvaddr);
break;
default:
BUG();
}
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);

View File

@ -406,9 +406,7 @@ pgd_offset (const struct mm_struct *mm, unsigned long address)
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) #define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr)
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
/* atomic versions of the some PTE manipulations: */ /* atomic versions of the some PTE manipulations: */

View File

@ -332,9 +332,7 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
((pte_t *)pmd_page_vaddr(*(dir)) + pte_index(address)) ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) \ #define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
/* Encode and de-code a swap entry */ /* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 2) & 0x1f) #define __swp_type(x) (((x).val >> 2) & 0x1f)

View File

@ -50,14 +50,6 @@
LFLUSH_I_AND_D = 0x00000808 LFLUSH_I_AND_D = 0x00000808
/* process bits for task_struct.ptrace */
PT_TRACESYS_OFF = 3
PT_TRACESYS_BIT = 1
PT_PTRACED_OFF = 3
PT_PTRACED_BIT = 0
PT_DTRACE_OFF = 3
PT_DTRACE_BIT = 2
#define SAVE_ALL_INT save_all_int #define SAVE_ALL_INT save_all_int
#define SAVE_ALL_SYS save_all_sys #define SAVE_ALL_SYS save_all_sys
#define RESTORE_ALL restore_all #define RESTORE_ALL restore_all

View File

@ -32,16 +32,6 @@
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
/* process bits for task_struct.flags */
PF_TRACESYS_OFF = 3
PF_TRACESYS_BIT = 5
PF_PTRACED_OFF = 3
PF_PTRACED_BIT = 4
PF_DTRACE_OFF = 1
PF_DTRACE_BIT = 5
LENOSYS = 38
#define SWITCH_STACK_SIZE (6*4+4) /* Includes return address */ #define SWITCH_STACK_SIZE (6*4+4) /* Includes return address */
/* /*

View File

@ -221,9 +221,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
} }
#define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) #define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address)
#define pte_unmap(pte) ((void)0) #define pte_unmap(pte) ((void)0)
#define pte_unmap_nested(pte) ((void)0)
/* /*
* Allocate and free page tables. The xxx_kernel() versions are * Allocate and free page tables. The xxx_kernel() versions are

View File

@ -219,9 +219,7 @@ static inline pte_t pgoff_to_pte(unsigned off)
#define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address)) #define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address))
/* FIXME: should we bother with kmap() here? */ /* FIXME: should we bother with kmap() here? */
#define pte_offset_map(pmd, address) ((pte_t *)kmap(pmd_page(*pmd)) + pte_index(address)) #define pte_offset_map(pmd, address) ((pte_t *)kmap(pmd_page(*pmd)) + pte_index(address))
#define pte_offset_map_nested(pmd, address) pte_offset_map(pmd, address)
#define pte_unmap(pte) kunmap(pte) #define pte_unmap(pte) kunmap(pte)
#define pte_unmap_nested(pte) kunmap(pte)
/* Macros to (de)construct the fake PTEs representing swap pages. */ /* Macros to (de)construct the fake PTEs representing swap pages. */
#define __swp_type(x) ((x).val & 0x7F) #define __swp_type(x) ((x).val & 0x7F)

View File

@ -504,12 +504,9 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
#define pte_offset_kernel(dir, addr) \ #define pte_offset_kernel(dir, addr) \
((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr)) ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
#define pte_offset_map(dir, addr) \ #define pte_offset_map(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
#define pte_offset_map_nested(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap(pte) kunmap_atomic(pte)
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
/* Encode and decode a nonlinear file mapping entry */ /* Encode and decode a nonlinear file mapping entry */
#define PTE_FILE_MAX_BITS 29 #define PTE_FILE_MAX_BITS 29

View File

@ -45,18 +45,12 @@ extern pte_t *pkmap_page_table;
extern void * kmap_high(struct page *page); extern void * kmap_high(struct page *page);
extern void kunmap_high(struct page *page); extern void kunmap_high(struct page *page);
extern void *__kmap(struct page *page); extern void *kmap(struct page *page);
extern void __kunmap(struct page *page); extern void kunmap(struct page *page);
extern void *__kmap_atomic(struct page *page, enum km_type type); extern void *__kmap_atomic(struct page *page);
extern void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *__kmap_atomic_to_page(void *ptr); extern struct page *kmap_atomic_to_page(void *ptr);
#define kmap __kmap
#define kunmap __kunmap
#define kmap_atomic __kmap_atomic
#define kunmap_atomic_notypecheck __kunmap_atomic_notypecheck
#define kmap_atomic_to_page __kmap_atomic_to_page
#define flush_cache_kmaps() flush_cache_all() #define flush_cache_kmaps() flush_cache_all()

View File

@ -154,10 +154,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define pte_offset_map(dir, address) \ #define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_offset_map_nested(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte)) #define pte_unmap(pte) ((void)(pte))
#define pte_unmap_nested(pte) ((void)(pte))
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)

View File

@ -257,10 +257,7 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
#define pte_offset_map(dir, address) \ #define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_offset_map_nested(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte)) #define pte_unmap(pte) ((void)(pte))
#define pte_unmap_nested(pte) ((void)(pte))
/* /*
* Initialize a new pgd / pmd table with invalid pointers. * Initialize a new pgd / pmd table with invalid pointers.

View File

@ -9,7 +9,7 @@ static pte_t *kmap_pte;
unsigned long highstart_pfn, highend_pfn; unsigned long highstart_pfn, highend_pfn;
void *__kmap(struct page *page) void *kmap(struct page *page)
{ {
void *addr; void *addr;
@ -21,16 +21,16 @@ void *__kmap(struct page *page)
return addr; return addr;
} }
EXPORT_SYMBOL(__kmap); EXPORT_SYMBOL(kmap);
void __kunmap(struct page *page) void kunmap(struct page *page)
{ {
BUG_ON(in_interrupt()); BUG_ON(in_interrupt());
if (!PageHighMem(page)) if (!PageHighMem(page))
return; return;
kunmap_high(page); kunmap_high(page);
} }
EXPORT_SYMBOL(__kunmap); EXPORT_SYMBOL(kunmap);
/* /*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@ -41,17 +41,17 @@ EXPORT_SYMBOL(__kunmap);
* kmaps are appropriate for short, tight code paths only. * kmaps are appropriate for short, tight code paths only.
*/ */
void *__kmap_atomic(struct page *page, enum km_type type) void *__kmap_atomic(struct page *page)
{ {
enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
@ -64,43 +64,47 @@ void *__kmap_atomic(struct page *page, enum km_type type)
} }
EXPORT_SYMBOL(__kmap_atomic); EXPORT_SYMBOL(__kmap_atomic);
void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) void __kunmap_atomic(void *kvaddr)
{ {
#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); int type;
if (vaddr < FIXADDR_START) { // FIXME if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable(); pagefault_enable();
return; return;
} }
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); type = kmap_atomic_idx_pop();
#ifdef CONFIG_DEBUG_HIGHMEM
{
int idx = type + KM_TYPE_NR * smp_processor_id();
/* BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
* force other mappings to Oops if they'll try to access
* this pte without first remap it /*
*/ * force other mappings to Oops if they'll try to access
pte_clear(&init_mm, vaddr, kmap_pte-idx); * this pte without first remap it
local_flush_tlb_one(vaddr); */
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_one(vaddr);
}
#endif #endif
pagefault_enable(); pagefault_enable();
} }
EXPORT_SYMBOL(__kunmap_atomic_notypecheck); EXPORT_SYMBOL(__kunmap_atomic);
/* /*
* This is the same as kmap_atomic() but can map memory that doesn't * This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it. * have a struct page associated with it.
*/ */
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) void *kmap_atomic_pfn(unsigned long pfn)
{ {
enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
pagefault_disable(); pagefault_disable();
debug_kmap_atomic(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
@ -109,7 +113,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
return (void*) vaddr; return (void*) vaddr;
} }
struct page *__kmap_atomic_to_page(void *ptr) struct page *kmap_atomic_to_page(void *ptr)
{ {
unsigned long idx, vaddr = (unsigned long)ptr; unsigned long idx, vaddr = (unsigned long)ptr;
pte_t *pte; pte_t *pte;

View File

@ -70,15 +70,16 @@ static inline void kunmap(struct page *page)
* be used in IRQ contexts, so in some (very limited) cases we need * be used in IRQ contexts, so in some (very limited) cases we need
* it. * it.
*/ */
static inline unsigned long kmap_atomic(struct page *page, enum km_type type) static inline unsigned long __kmap_atomic(struct page *page)
{ {
enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
pagefault_disable();
if (page < highmem_start_page) if (page < highmem_start_page)
return page_address(page); return page_address(page);
debug_kmap_atomic(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id(); idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#if HIGHMEM_DEBUG #if HIGHMEM_DEBUG
@ -91,26 +92,35 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
return vaddr; return vaddr;
} }
static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type type) static inline void __kunmap_atomic(unsigned long vaddr)
{ {
#if HIGHMEM_DEBUG int type;
enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id();
if (vaddr < FIXADDR_START) /* FIXME */ if (vaddr < FIXADDR_START) { /* FIXME */
pagefault_enable();
return; return;
}
if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)) type = kmap_atomic_idx_pop();
BUG();
/* #if HIGHMEM_DEBUG
* force other mappings to Oops if they'll try to access {
* this pte without first remap it unsigned int idx;
*/ idx = type + KM_TYPE_NR * smp_processor_id();
pte_clear(kmap_pte - idx);
__flush_tlb_one(vaddr); if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
BUG();
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(kmap_pte - idx);
__flush_tlb_one(vaddr);
}
#endif #endif
pagefault_enable();
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */ #endif /* _ASM_HIGHMEM_H */

View File

@ -457,9 +457,7 @@ static inline int set_kernel_exec(unsigned long vaddr, int enable)
#define pte_offset_map(dir, address) \ #define pte_offset_map(dir, address) \
((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address)) ((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address))
#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do {} while (0) #define pte_unmap(pte) do {} while (0)
#define pte_unmap_nested(pte) do {} while (0)
/* /*
* The MN10300 has external MMU info in the form of a TLB: this is adapted from * The MN10300 has external MMU info in the form of a TLB: this is adapted from

View File

@ -397,9 +397,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_offset_kernel(pmd, address) \ #define pte_offset_kernel(pmd, address) \
((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0)

View File

@ -60,9 +60,8 @@ extern pte_t *pkmap_page_table;
extern void *kmap_high(struct page *page); extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page); extern void kunmap_high(struct page *page);
extern void *kmap_atomic_prot(struct page *page, enum km_type type, extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
pgprot_t prot); extern void __kunmap_atomic(void *kvaddr);
extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
static inline void *kmap(struct page *page) static inline void *kmap(struct page *page)
{ {
@ -80,9 +79,9 @@ static inline void kunmap(struct page *page)
kunmap_high(page); kunmap_high(page);
} }
static inline void *kmap_atomic(struct page *page, enum km_type type) static inline void *__kmap_atomic(struct page *page)
{ {
return kmap_atomic_prot(page, type, kmap_prot); return kmap_atomic_prot(page, kmap_prot);
} }
static inline struct page *kmap_atomic_to_page(void *ptr) static inline struct page *kmap_atomic_to_page(void *ptr)

View File

@ -308,12 +308,8 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
#define pte_offset_kernel(dir, addr) \ #define pte_offset_kernel(dir, addr) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir, addr) \ #define pte_offset_map(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
#define pte_offset_map_nested(dir, addr) \ #define pte_unmap(pte) kunmap_atomic(pte)
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
/* /*
* Encode and decode a swap entry. * Encode and decode a swap entry.

View File

@ -193,9 +193,7 @@
(((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_unmap(pte) do { } while(0) #define pte_unmap(pte) do { } while(0)
#define pte_unmap_nested(pte) do { } while(0)
/* to find an entry in a kernel page-table-directory */ /* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */ /* This now only contains the vmalloc pages */

View File

@ -238,9 +238,7 @@ static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
* memory in this pool does not change. * memory in this pool does not change.
*/ */
if (spare_needed && reserve_freed) { if (spare_needed && reserve_freed) {
tmp = min(spare_needed, min(reserve_freed, tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
(viodev->cmo.entitled -
VIO_CMO_MIN_ENT)));
vio_cmo.spare += tmp; vio_cmo.spare += tmp;
viodev->cmo.entitled -= tmp; viodev->cmo.entitled -= tmp;

View File

@ -29,17 +29,17 @@
* be used in IRQ contexts, so in some (very limited) cases we need * be used in IRQ contexts, so in some (very limited) cases we need
* it. * it.
*/ */
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{ {
unsigned int idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
@ -52,26 +52,33 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
} }
EXPORT_SYMBOL(kmap_atomic_prot); EXPORT_SYMBOL(kmap_atomic_prot);
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) void __kunmap_atomic(void *kvaddr)
{ {
#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); int type;
if (vaddr < __fix_to_virt(FIX_KMAP_END)) { if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable(); pagefault_enable();
return; return;
} }
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); type = kmap_atomic_idx_pop();
/* #ifdef CONFIG_DEBUG_HIGHMEM
* force other mappings to Oops if they'll try to access {
* this pte without first remap it unsigned int idx;
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx); idx = type + KM_TYPE_NR * smp_processor_id();
local_flush_tlb_page(NULL, vaddr); BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_page(NULL, vaddr);
}
#endif #endif
pagefault_enable(); pagefault_enable();
} }
EXPORT_SYMBOL(kunmap_atomic_notypecheck); EXPORT_SYMBOL(__kunmap_atomic);

View File

@ -1094,9 +1094,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
#define pte_offset_kernel(pmd, address) pte_offset(pmd,address) #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
/* /*
* 31 bit swap entry format: * 31 bit swap entry format:

View File

@ -88,10 +88,7 @@ static inline void pmd_clear(pmd_t *pmdp)
#define pte_offset_map(dir, address) \ #define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_offset_map_nested(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte)) #define pte_unmap(pte) ((void)(pte))
#define pte_unmap_nested(pte) ((void)(pte))
/* /*
* Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken, * Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken,

View File

@ -429,10 +429,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_offset_kernel(dir, address) \ #define pte_offset_kernel(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address) #define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#ifdef CONFIG_X2TLB #ifdef CONFIG_X2TLB
#define pte_ERROR(e) \ #define pte_ERROR(e) \

View File

@ -84,9 +84,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
#define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr)
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define IOBASE_VADDR 0xff000000 #define IOBASE_VADDR 0xff000000

View File

@ -70,8 +70,8 @@ static inline void kunmap(struct page *page)
kunmap_high(page); kunmap_high(page);
} }
extern void *kmap_atomic(struct page *page, enum km_type type); extern void *__kmap_atomic(struct page *page);
extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); extern void __kunmap_atomic(void *kvaddr);
extern struct page *kmap_atomic_to_page(void *vaddr); extern struct page *kmap_atomic_to_page(void *vaddr);
#define flush_cache_kmaps() flush_cache_all() #define flush_cache_kmaps() flush_cache_all()

View File

@ -304,10 +304,7 @@ BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
* and sun4c is guaranteed to have no highmem anyway. * and sun4c is guaranteed to have no highmem anyway.
*/ */
#define pte_offset_map(d, a) pte_offset_kernel(d,a) #define pte_offset_map(d, a) pte_offset_kernel(d,a)
#define pte_offset_map_nested(d, a) pte_offset_kernel(d,a)
#define pte_unmap(pte) do{}while(0) #define pte_unmap(pte) do{}while(0)
#define pte_unmap_nested(pte) do{}while(0)
/* Certain architectures need to do special things when pte's /* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following * within a page table are directly modified. Thus, the following

View File

@ -652,9 +652,7 @@ static inline int pte_special(pte_t pte)
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_kernel pte_index #define pte_offset_kernel pte_index
#define pte_offset_map pte_index #define pte_offset_map pte_index
#define pte_offset_map_nested pte_index
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
/* Actual page table PTE updates. */ /* Actual page table PTE updates. */
extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig); extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig);

View File

@ -29,17 +29,17 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
void *kmap_atomic(struct page *page, enum km_type type) void *__kmap_atomic(struct page *page)
{ {
unsigned long idx;
unsigned long vaddr; unsigned long vaddr;
long idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@ -63,44 +63,50 @@ void *kmap_atomic(struct page *page, enum km_type type)
return (void*) vaddr; return (void*) vaddr;
} }
EXPORT_SYMBOL(kmap_atomic); EXPORT_SYMBOL(__kmap_atomic);
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) void __kunmap_atomic(void *kvaddr)
{ {
#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); int type;
if (vaddr < FIXADDR_START) { // FIXME if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable(); pagefault_enable();
return; return;
} }
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); type = kmap_atomic_idx_pop();
/* XXX Fix - Anton */ #ifdef CONFIG_DEBUG_HIGHMEM
{
unsigned long idx;
idx = type + KM_TYPE_NR * smp_processor_id();
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
/* XXX Fix - Anton */
#if 0 #if 0
__flush_cache_one(vaddr); __flush_cache_one(vaddr);
#else #else
flush_cache_all(); flush_cache_all();
#endif #endif
/* /*
* force other mappings to Oops if they'll try to access * force other mappings to Oops if they'll try to access
* this pte without first remap it * this pte without first remap it
*/ */
pte_clear(&init_mm, vaddr, kmap_pte-idx); pte_clear(&init_mm, vaddr, kmap_pte-idx);
/* XXX Fix - Anton */ /* XXX Fix - Anton */
#if 0 #if 0
__flush_tlb_one(vaddr); __flush_tlb_one(vaddr);
#else #else
flush_tlb_all(); flush_tlb_all();
#endif #endif
}
#endif #endif
pagefault_enable(); pagefault_enable();
} }
EXPORT_SYMBOL(kunmap_atomic_notypecheck); EXPORT_SYMBOL(__kunmap_atomic);
/* We may be fed a pagetable here by ptep_to_xxx and others. */ /* We may be fed a pagetable here by ptep_to_xxx and others. */
struct page *kmap_atomic_to_page(void *ptr) struct page *kmap_atomic_to_page(void *ptr)

View File

@ -60,12 +60,12 @@ void *kmap_fix_kpte(struct page *page, int finished);
/* This macro is used only in map_new_virtual() to map "page". */ /* This macro is used only in map_new_virtual() to map "page". */
#define kmap_prot page_to_kpgprot(page) #define kmap_prot page_to_kpgprot(page)
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); void *__kmap_atomic(struct page *page);
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr); struct page *kmap_atomic_to_page(void *ptr);
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); void *kmap_atomic_prot(struct page *page, pgprot_t prot);
void *kmap_atomic(struct page *page, enum km_type type);
void kmap_atomic_fix_kpte(struct page *page, int finished); void kmap_atomic_fix_kpte(struct page *page, int finished);
#define flush_cache_kmaps() do { } while (0) #define flush_cache_kmaps() do { } while (0)

View File

@ -347,15 +347,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type); extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type);
#define pte_offset_map(dir, address) \ #define pte_offset_map(dir, address) \
_pte_offset_map(dir, address, KM_PTE0) _pte_offset_map(dir, address, KM_PTE0)
#define pte_offset_map_nested(dir, address) \
_pte_offset_map(dir, address, KM_PTE1)
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
#else #else
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address) #define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#endif #endif
/* Clear a non-executable kernel PTE and flush it from the TLB. */ /* Clear a non-executable kernel PTE and flush it from the TLB. */

View File

@ -56,50 +56,6 @@ void kunmap(struct page *page)
} }
EXPORT_SYMBOL(kunmap); EXPORT_SYMBOL(kunmap);
static void debug_kmap_atomic_prot(enum km_type type)
{
#ifdef CONFIG_DEBUG_HIGHMEM
static unsigned warn_count = 10;
if (unlikely(warn_count == 0))
return;
if (unlikely(in_interrupt())) {
if (in_irq()) {
if (type != KM_IRQ0 && type != KM_IRQ1 &&
type != KM_BIO_SRC_IRQ &&
/* type != KM_BIO_DST_IRQ && */
type != KM_BOUNCE_READ) {
WARN_ON(1);
warn_count--;
}
} else if (!irqs_disabled()) { /* softirq */
if (type != KM_IRQ0 && type != KM_IRQ1 &&
type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
type != KM_SKB_SUNRPC_DATA &&
type != KM_SKB_DATA_SOFTIRQ &&
type != KM_BOUNCE_READ) {
WARN_ON(1);
warn_count--;
}
}
}
if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) {
if (!irqs_disabled()) {
WARN_ON(1);
warn_count--;
}
} else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
if (irq_count() == 0 && !irqs_disabled()) {
WARN_ON(1);
warn_count--;
}
}
#endif
}
/* /*
* Describe a single atomic mapping of a page on a given cpu at a * Describe a single atomic mapping of a page on a given cpu at a
* given address, and allow it to be linked into a list. * given address, and allow it to be linked into a list.
@ -240,10 +196,10 @@ void kmap_atomic_fix_kpte(struct page *page, int finished)
* When holding an atomic kmap is is not legal to sleep, so atomic * When holding an atomic kmap is is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only. * kmaps are appropriate for short, tight code paths only.
*/ */
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{ {
enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
pte_t *pte; pte_t *pte;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
@ -255,8 +211,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic_prot(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
pte = kmap_get_pte(vaddr); pte = kmap_get_pte(vaddr);
@ -269,25 +224,31 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
} }
EXPORT_SYMBOL(kmap_atomic_prot); EXPORT_SYMBOL(kmap_atomic_prot);
void *kmap_atomic(struct page *page, enum km_type type) void *__kmap_atomic(struct page *page)
{ {
/* PAGE_NONE is a magic value that tells us to check immutability. */ /* PAGE_NONE is a magic value that tells us to check immutability. */
return kmap_atomic_prot(page, type, PAGE_NONE); return kmap_atomic_prot(page, type, PAGE_NONE);
} }
EXPORT_SYMBOL(kmap_atomic); EXPORT_SYMBOL(__kmap_atomic);
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) void __kunmap_atomic(void *kvaddr)
{ {
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
/* if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
* Force other mappings to Oops if they try to access this pte without vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
* first remapping it. Keeping stale mappings around is a bad idea.
*/
if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
pte_t *pte = kmap_get_pte(vaddr); pte_t *pte = kmap_get_pte(vaddr);
pte_t pteval = *pte; pte_t pteval = *pte;
int idx, type;
type = kmap_atomic_idx_pop();
idx = type + KM_TYPE_NR*smp_processor_id();
/*
* Force other mappings to Oops if they try to access this pte
* without first remapping it. Keeping stale mappings around
* is a bad idea.
*/
BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
kmap_atomic_unregister(pte_page(pteval), vaddr); kmap_atomic_unregister(pte_page(pteval), vaddr);
kpte_clear_flush(pte, vaddr); kpte_clear_flush(pte, vaddr);
@ -300,19 +261,19 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
arch_flush_lazy_mmu_mode(); arch_flush_lazy_mmu_mode();
pagefault_enable(); pagefault_enable();
} }
EXPORT_SYMBOL(kunmap_atomic_notypecheck); EXPORT_SYMBOL(__kunmap_atomic);
/* /*
* This API is supposed to allow us to map memory without a "struct page". * This API is supposed to allow us to map memory without a "struct page".
* Currently we don't support this, though this may change in the future. * Currently we don't support this, though this may change in the future.
*/ */
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) void *kmap_atomic_pfn(unsigned long pfn)
{ {
return kmap_atomic(pfn_to_page(pfn), type); return kmap_atomic(pfn_to_page(pfn));
} }
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{ {
return kmap_atomic_prot(pfn_to_page(pfn), type, prot); return kmap_atomic_prot(pfn_to_page(pfn), prot);
} }
struct page *kmap_atomic_to_page(void *ptr) struct page *kmap_atomic_to_page(void *ptr)

View File

@ -120,6 +120,9 @@ config SMP
If you don't know what to do, say N. If you don't know what to do, say N.
config GENERIC_HARDIRQS_NO__DO_IRQ
def_bool y
config NR_CPUS config NR_CPUS
int "Maximum number of CPUs (2-32)" int "Maximum number of CPUs (2-32)"
range 2 32 range 2 32
@ -147,3 +150,6 @@ config KERNEL_STACK_ORDER
This option determines the size of UML kernel stacks. They will This option determines the size of UML kernel stacks. They will
be 1 << order pages. The default is OK unless you're running Valgrind be 1 << order pages. The default is OK unless you're running Valgrind
on UML, in which case, set this to 3. on UML, in which case, set this to 3.
config NO_DMA
def_bool y

View File

@ -566,7 +566,6 @@ CONFIG_CRC32=m
# CONFIG_CRC7 is not set # CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set # CONFIG_LIBCRC32C is not set
CONFIG_PLIST=y CONFIG_PLIST=y
CONFIG_HAS_DMA=y
# #
# SCSI device support # SCSI device support

View File

@ -1,112 +0,0 @@
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
#include <asm/scatterlist.h>
static inline int
dma_supported(struct device *dev, u64 mask)
{
BUG();
return(0);
}
static inline int
dma_set_mask(struct device *dev, u64 dma_mask)
{
BUG();
return(0);
}
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag)
{
BUG();
return((void *) 0);
}
static inline void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
BUG();
}
static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
BUG();
return(0);
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
BUG();
return(0);
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
BUG();
return(0);
}
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG();
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline int
dma_mapping_error(struct device *dev, dma_addr_t dma_handle)
{
BUG();
return 0;
}
#endif

View File

@ -338,9 +338,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) \ #define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
struct mm_struct; struct mm_struct;
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);

View File

@ -8,23 +8,38 @@ extern int set_signals(int enable);
extern void block_signals(void); extern void block_signals(void);
extern void unblock_signals(void); extern void unblock_signals(void);
#define local_save_flags(flags) do { typecheck(unsigned long, flags); \ static inline unsigned long arch_local_save_flags(void)
(flags) = get_signals(); } while(0) {
#define local_irq_restore(flags) do { typecheck(unsigned long, flags); \ return get_signals();
set_signals(flags); } while(0) }
#define local_irq_save(flags) do { local_save_flags(flags); \ static inline void arch_local_irq_restore(unsigned long flags)
local_irq_disable(); } while(0) {
set_signals(flags);
}
#define local_irq_enable() unblock_signals() static inline void arch_local_irq_enable(void)
#define local_irq_disable() block_signals() {
unblock_signals();
}
#define irqs_disabled() \ static inline void arch_local_irq_disable(void)
({ \ {
unsigned long flags; \ block_signals();
local_save_flags(flags); \ }
(flags == 0); \
}) static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
flags = arch_local_save_flags();
arch_local_irq_disable();
return flags;
}
static inline bool arch_irqs_disabled(void)
{
return arch_local_save_flags() == 0;
}
extern void *_switch_to(void *prev, void *next, void *last); extern void *_switch_to(void *prev, void *next, void *last);
#define switch_to(prev, next, last) prev = _switch_to(prev, next, last) #define switch_to(prev, next, last) prev = _switch_to(prev, next, last)

View File

@ -50,8 +50,18 @@ SECTIONS
.rela.got : { *(.rela.got) } .rela.got : { *(.rela.got) }
.rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
.rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
.rel.plt : { *(.rel.plt) } .rel.plt : {
.rela.plt : { *(.rela.plt) } *(.rel.plt)
PROVIDE_HIDDEN(__rel_iplt_start = .);
*(.rel.iplt)
PROVIDE_HIDDEN(__rel_iplt_end = .);
}
.rela.plt : {
*(.rela.plt)
PROVIDE_HIDDEN(__rela_iplt_start = .);
*(.rela.iplt)
PROVIDE_HIDDEN(__rela_iplt_end = .);
}
.init : { .init : {
KEEP (*(.init)) KEEP (*(.init))
} =0x90909090 } =0x90909090

View File

@ -334,7 +334,7 @@ unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs); struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
irq_enter(); irq_enter();
__do_IRQ(irq); generic_handle_irq(irq);
irq_exit(); irq_exit();
set_irq_regs(old_regs); set_irq_regs(old_regs);
return 1; return 1;
@ -391,17 +391,10 @@ void __init init_IRQ(void)
{ {
int i; int i;
irq_desc[TIMER_IRQ].status = IRQ_DISABLED; set_irq_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
irq_desc[TIMER_IRQ].action = NULL;
irq_desc[TIMER_IRQ].depth = 1;
irq_desc[TIMER_IRQ].chip = &SIGVTALRM_irq_type;
enable_irq(TIMER_IRQ);
for (i = 1; i < NR_IRQS; i++) { for (i = 1; i < NR_IRQS; i++) {
irq_desc[i].status = IRQ_DISABLED; set_irq_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
irq_desc[i].chip = &normal_irq_type;
enable_irq(i);
} }
} }

View File

@ -22,7 +22,7 @@ SECTIONS
_text = .; _text = .;
_stext = .; _stext = .;
__init_begin = .; __init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE) INIT_TEXT_SECTION(0)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
.text : .text :
@ -43,6 +43,23 @@ SECTIONS
__syscall_stub_end = .; __syscall_stub_end = .;
} }
/*
* These are needed even in a static link, even if they wind up being empty.
* Newer glibc needs these __rel{,a}_iplt_{start,end} symbols.
*/
.rel.plt : {
*(.rel.plt)
PROVIDE_HIDDEN(__rel_iplt_start = .);
*(.rel.iplt)
PROVIDE_HIDDEN(__rel_iplt_end = .);
}
.rela.plt : {
*(.rela.plt)
PROVIDE_HIDDEN(__rela_iplt_start = .);
*(.rela.iplt)
PROVIDE_HIDDEN(__rela_iplt_end = .);
}
#include "asm/common.lds.S" #include "asm/common.lds.S"
init.data : { INIT_DATA } init.data : { INIT_DATA }

View File

@ -60,7 +60,7 @@ static inline long long timeval_to_ns(const struct timeval *tv)
long long disable_timer(void) long long disable_timer(void)
{ {
struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } }); struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } });
int remain, max = UM_NSEC_PER_SEC / UM_HZ; long long remain, max = UM_NSEC_PER_SEC / UM_HZ;
if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0) if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0)
printk(UM_KERN_ERR "disable_timer - setitimer failed, " printk(UM_KERN_ERR "disable_timer - setitimer failed, "

View File

@ -59,11 +59,12 @@ extern void kunmap_high(struct page *page);
void *kmap(struct page *page); void *kmap(struct page *page);
void kunmap(struct page *page); void kunmap(struct page *page);
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
void *kmap_atomic(struct page *page, enum km_type type); void *kmap_atomic_prot(struct page *page, pgprot_t prot);
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); void *__kmap_atomic(struct page *page);
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr); struct page *kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() do { } while (0) #define flush_cache_kmaps() do { } while (0)

View File

@ -27,10 +27,10 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
void __iomem * void __iomem *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
void void
iounmap_atomic(void __iomem *kvaddr, enum km_type type); iounmap_atomic(void __iomem *kvaddr);
int int
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);

View File

@ -49,24 +49,14 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
#endif #endif
#if defined(CONFIG_HIGHPTE) #if defined(CONFIG_HIGHPTE)
#define __KM_PTE \
(in_nmi() ? KM_NMI_PTE : \
in_irq() ? KM_IRQ_PTE : \
KM_PTE0)
#define pte_offset_map(dir, address) \ #define pte_offset_map(dir, address) \
((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
pte_index((address))) pte_index((address)))
#define pte_offset_map_nested(dir, address) \ #define pte_unmap(pte) kunmap_atomic((pte))
((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \
pte_index((address)))
#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
#else #else
#define pte_offset_map(dir, address) \ #define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#endif #endif
/* Clear a kernel PTE and flush it from the TLB */ /* Clear a kernel PTE and flush it from the TLB */

View File

@ -127,9 +127,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
/* x86-64 always has all page tables mapped. */ /* x86-64 always has all page tables mapped. */
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
#define pte_unmap(pte) ((void)(pte))/* NOP */ #define pte_unmap(pte) ((void)(pte))/* NOP */
#define pte_unmap_nested(pte) ((void)(pte)) /* NOP */
#define update_mmu_cache(vma, address, ptep) do { } while (0) #define update_mmu_cache(vma, address, ptep) do { } while (0)

View File

@ -327,6 +327,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1; l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
} }
static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)

View File

@ -49,7 +49,6 @@ static unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n) copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
{ {
unsigned long offset, addr = (unsigned long)from; unsigned long offset, addr = (unsigned long)from;
int type = in_nmi() ? KM_NMI : KM_IRQ0;
unsigned long size, len = 0; unsigned long size, len = 0;
struct page *page; struct page *page;
void *map; void *map;
@ -63,9 +62,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
offset = addr & (PAGE_SIZE - 1); offset = addr & (PAGE_SIZE - 1);
size = min(PAGE_SIZE - offset, n - len); size = min(PAGE_SIZE - offset, n - len);
map = kmap_atomic(page, type); map = kmap_atomic(page);
memcpy(to, map+offset, size); memcpy(to, map+offset, size);
kunmap_atomic(map, type); kunmap_atomic(map);
put_page(page); put_page(page);
len += size; len += size;

View File

@ -61,7 +61,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!is_crashed_pfn_valid(pfn)) if (!is_crashed_pfn_valid(pfn))
return -EFAULT; return -EFAULT;
vaddr = kmap_atomic_pfn(pfn, KM_PTE0); vaddr = kmap_atomic_pfn(pfn);
if (!userbuf) { if (!userbuf) {
memcpy(buf, (vaddr + offset), csize); memcpy(buf, (vaddr + offset), csize);

View File

@ -713,7 +713,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
switch (action & 0xf) { switch (action & 0xf) {
case CPU_ONLINE: case CPU_ONLINE:
INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work); INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
init_completion(&work.complete); init_completion(&work.complete);
/* FIXME: add schedule_work_on() */ /* FIXME: add schedule_work_on() */
schedule_delayed_work_on(cpu, &work.work, 0); schedule_delayed_work_on(cpu, &work.work, 0);

View File

@ -747,7 +747,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
}; };
INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle); INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
alternatives_smp_switch(1); alternatives_smp_switch(1);

View File

@ -919,9 +919,9 @@ spurious_fault(unsigned long error_code, unsigned long address)
int show_unhandled_signals = 1; int show_unhandled_signals = 1;
static inline int static inline int
access_error(unsigned long error_code, int write, struct vm_area_struct *vma) access_error(unsigned long error_code, struct vm_area_struct *vma)
{ {
if (write) { if (error_code & PF_WRITE) {
/* write, present and write, not present: */ /* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE))) if (unlikely(!(vma->vm_flags & VM_WRITE)))
return 1; return 1;
@ -956,8 +956,10 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
struct task_struct *tsk; struct task_struct *tsk;
unsigned long address; unsigned long address;
struct mm_struct *mm; struct mm_struct *mm;
int write;
int fault; int fault;
int write = error_code & PF_WRITE;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
(write ? FAULT_FLAG_WRITE : 0);
tsk = current; tsk = current;
mm = tsk->mm; mm = tsk->mm;
@ -1068,6 +1070,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
bad_area_nosemaphore(regs, error_code, address); bad_area_nosemaphore(regs, error_code, address);
return; return;
} }
retry:
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
} else { } else {
/* /*
@ -1111,9 +1114,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
* we can handle it.. * we can handle it..
*/ */
good_area: good_area:
write = error_code & PF_WRITE; if (unlikely(access_error(error_code, vma))) {
if (unlikely(access_error(error_code, write, vma))) {
bad_area_access_error(regs, error_code, address); bad_area_access_error(regs, error_code, address);
return; return;
} }
@ -1123,21 +1124,34 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault: * the fault:
*/ */
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
mm_fault_error(regs, error_code, address, fault); mm_fault_error(regs, error_code, address, fault);
return; return;
} }
if (fault & VM_FAULT_MAJOR) { /*
tsk->maj_flt++; * Major/minor page fault accounting is only done on the
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, * initial attempt. If we go through a retry, it is extremely
regs, address); * likely that the page will be found in page cache at that point.
} else { */
tsk->min_flt++; if (flags & FAULT_FLAG_ALLOW_RETRY) {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, if (fault & VM_FAULT_MAJOR) {
regs, address); tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
goto retry;
}
} }
check_v8086_mode(regs, address, tsk); check_v8086_mode(regs, address, tsk);

View File

@ -9,6 +9,7 @@ void *kmap(struct page *page)
return page_address(page); return page_address(page);
return kmap_high(page); return kmap_high(page);
} }
EXPORT_SYMBOL(kmap);
void kunmap(struct page *page) void kunmap(struct page *page)
{ {
@ -18,6 +19,7 @@ void kunmap(struct page *page)
return; return;
kunmap_high(page); kunmap_high(page);
} }
EXPORT_SYMBOL(kunmap);
/* /*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@ -27,10 +29,10 @@ void kunmap(struct page *page)
* However when holding an atomic kmap it is not legal to sleep, so atomic * However when holding an atomic kmap it is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only. * kmaps are appropriate for short, tight code paths only.
*/ */
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{ {
enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable(); pagefault_disable();
@ -38,8 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx))); BUG_ON(!pte_none(*(kmap_pte-idx)));
@ -47,44 +48,56 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
return (void *)vaddr; return (void *)vaddr;
} }
EXPORT_SYMBOL(kmap_atomic_prot);
void *kmap_atomic(struct page *page, enum km_type type) void *__kmap_atomic(struct page *page)
{ {
return kmap_atomic_prot(page, type, kmap_prot); return kmap_atomic_prot(page, kmap_prot);
}
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
/*
* Force other mappings to Oops if they'll try to access this pte
* without first remap it. Keeping stale mappings around is a bad idea
* also, in case the page changes cacheability attributes or becomes
* a protected page in a hypervisor.
*/
if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
kpte_clear_flush(kmap_pte-idx, vaddr);
else {
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr < PAGE_OFFSET);
BUG_ON(vaddr >= (unsigned long)high_memory);
#endif
}
pagefault_enable();
} }
EXPORT_SYMBOL(__kmap_atomic);
/* /*
* This is the same as kmap_atomic() but can map memory that doesn't * This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it. * have a struct page associated with it.
*/ */
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) void *kmap_atomic_pfn(unsigned long pfn)
{ {
return kmap_atomic_prot_pfn(pfn, type, kmap_prot); return kmap_atomic_prot_pfn(pfn, kmap_prot);
} }
EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
int idx, type;
type = kmap_atomic_idx_pop();
idx = type + KM_TYPE_NR * smp_processor_id();
#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#endif
/*
* Force other mappings to Oops if they'll try to access this
* pte without first remap it. Keeping stale mappings around
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
kpte_clear_flush(kmap_pte-idx, vaddr);
}
#ifdef CONFIG_DEBUG_HIGHMEM
else {
BUG_ON(vaddr < PAGE_OFFSET);
BUG_ON(vaddr >= (unsigned long)high_memory);
}
#endif
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
struct page *kmap_atomic_to_page(void *ptr) struct page *kmap_atomic_to_page(void *ptr)
{ {
@ -98,12 +111,6 @@ struct page *kmap_atomic_to_page(void *ptr)
pte = kmap_pte - (idx - FIX_KMAP_BEGIN); pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte); return pte_page(*pte);
} }
EXPORT_SYMBOL(kmap);
EXPORT_SYMBOL(kunmap);
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kunmap_atomic_notypecheck);
EXPORT_SYMBOL(kmap_atomic_prot);
EXPORT_SYMBOL(kmap_atomic_to_page); EXPORT_SYMBOL(kmap_atomic_to_page);
void __init set_highmem_pages_init(void) void __init set_highmem_pages_init(void)

View File

@ -48,21 +48,20 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
} }
EXPORT_SYMBOL_GPL(iomap_create_wc); EXPORT_SYMBOL_GPL(iomap_create_wc);
void void iomap_free(resource_size_t base, unsigned long size)
iomap_free(resource_size_t base, unsigned long size)
{ {
io_free_memtype(base, base + size); io_free_memtype(base, base + size);
} }
EXPORT_SYMBOL_GPL(iomap_free); EXPORT_SYMBOL_GPL(iomap_free);
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{ {
enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
pagefault_disable(); pagefault_disable();
debug_kmap_atomic(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id(); idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
@ -72,10 +71,10 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
} }
/* /*
* Map 'pfn' using fixed map 'type' and protections 'prot' * Map 'pfn' using protections 'prot'
*/ */
void __iomem * void __iomem *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{ {
/* /*
* For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
@ -86,24 +85,33 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
prot = PAGE_KERNEL_UC_MINUS; prot = PAGE_KERNEL_UC_MINUS;
return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot); return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
} }
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
void void
iounmap_atomic(void __iomem *kvaddr, enum km_type type) iounmap_atomic(void __iomem *kvaddr)
{ {
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
/* if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
* Force other mappings to Oops if they'll try to access this pte vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
* without first remap it. Keeping stale mappings around is a bad idea int idx, type;
* also, in case the page changes cacheability attributes or becomes
* a protected page in a hypervisor. type = kmap_atomic_idx_pop();
*/ idx = type + KM_TYPE_NR * smp_processor_id();
if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#endif
/*
* Force other mappings to Oops if they'll try to access this
* pte without first remap it. Keeping stale mappings around
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
kpte_clear_flush(kmap_pte-idx, vaddr); kpte_clear_flush(kmap_pte-idx, vaddr);
}
pagefault_enable(); pagefault_enable();
} }

View File

@ -324,10 +324,7 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
#define pte_offset_kernel(dir,addr) \ #define pte_offset_kernel(dir,addr) \
((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr)) ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) #define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr))
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
/* /*

View File

@ -83,8 +83,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
memcpy(dest_buf, src_buf, len); memcpy(dest_buf, src_buf, len);
kunmap_atomic(dest_buf, KM_USER0);
kunmap_atomic(src_buf, KM_USER1); kunmap_atomic(src_buf, KM_USER1);
kunmap_atomic(dest_buf, KM_USER0);
async_tx_sync_epilog(submit); async_tx_sync_epilog(submit);
} }

View File

@ -89,9 +89,9 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
memcpy(walk->dst.virt.addr, walk->page, n); memcpy(walk->dst.virt.addr, walk->page, n);
blkcipher_unmap_dst(walk); blkcipher_unmap_dst(walk);
} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) { } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
blkcipher_unmap_src(walk);
if (walk->flags & BLKCIPHER_WALK_DIFF) if (walk->flags & BLKCIPHER_WALK_DIFF)
blkcipher_unmap_dst(walk); blkcipher_unmap_dst(walk);
blkcipher_unmap_src(walk);
} }
scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->in, n);

View File

@ -160,6 +160,18 @@ static ssize_t node_read_numastat(struct sys_device * dev,
} }
static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
static ssize_t node_read_vmstat(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf)
{
int nid = dev->id;
return sprintf(buf,
"nr_written %lu\n"
"nr_dirtied %lu\n",
node_page_state(nid, NR_WRITTEN),
node_page_state(nid, NR_DIRTIED));
}
static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
static ssize_t node_read_distance(struct sys_device * dev, static ssize_t node_read_distance(struct sys_device * dev,
struct sysdev_attribute *attr, char * buf) struct sysdev_attribute *attr, char * buf)
{ {
@ -243,6 +255,7 @@ int register_node(struct node *node, int num, struct node *parent)
sysdev_create_file(&node->sysdev, &attr_meminfo); sysdev_create_file(&node->sysdev, &attr_meminfo);
sysdev_create_file(&node->sysdev, &attr_numastat); sysdev_create_file(&node->sysdev, &attr_numastat);
sysdev_create_file(&node->sysdev, &attr_distance); sysdev_create_file(&node->sysdev, &attr_distance);
sysdev_create_file(&node->sysdev, &attr_vmstat);
scan_unevictable_register_node(node); scan_unevictable_register_node(node);
@ -267,6 +280,7 @@ void unregister_node(struct node *node)
sysdev_remove_file(&node->sysdev, &attr_meminfo); sysdev_remove_file(&node->sysdev, &attr_meminfo);
sysdev_remove_file(&node->sysdev, &attr_numastat); sysdev_remove_file(&node->sysdev, &attr_numastat);
sysdev_remove_file(&node->sysdev, &attr_distance); sysdev_remove_file(&node->sysdev, &attr_distance);
sysdev_remove_file(&node->sysdev, &attr_vmstat);
scan_unevictable_unregister_node(node); scan_unevictable_unregister_node(node);
hugetlb_unregister_node(node); /* no-op, if memoryless node */ hugetlb_unregister_node(node); /* no-op, if memoryless node */

View File

@ -101,8 +101,8 @@ static int transfer_none(struct loop_device *lo, int cmd,
else else
memcpy(raw_buf, loop_buf, size); memcpy(raw_buf, loop_buf, size);
kunmap_atomic(raw_buf, KM_USER0);
kunmap_atomic(loop_buf, KM_USER1); kunmap_atomic(loop_buf, KM_USER1);
kunmap_atomic(raw_buf, KM_USER0);
cond_resched(); cond_resched();
return 0; return 0;
} }
@ -130,8 +130,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
for (i = 0; i < size; i++) for (i = 0; i < size; i++)
*out++ = *in++ ^ key[(i & 511) % keysize]; *out++ = *in++ ^ key[(i & 511) % keysize];
kunmap_atomic(raw_buf, KM_USER0);
kunmap_atomic(loop_buf, KM_USER1); kunmap_atomic(loop_buf, KM_USER1);
kunmap_atomic(raw_buf, KM_USER0);
cond_resched(); cond_resched();
return 0; return 0;
} }

View File

@ -32,12 +32,12 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/uaccess.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/io.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/io.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/div64.h> #include <asm/div64.h>
@ -81,13 +81,13 @@ static cycle_t read_hpet(struct clocksource *cs)
} }
static struct clocksource clocksource_hpet = { static struct clocksource clocksource_hpet = {
.name = "hpet", .name = "hpet",
.rating = 250, .rating = 250,
.read = read_hpet, .read = read_hpet,
.mask = CLOCKSOURCE_MASK(64), .mask = CLOCKSOURCE_MASK(64),
.mult = 0, /* to be calculated */ .mult = 0, /* to be calculated */
.shift = 10, .shift = 10,
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
}; };
static struct clocksource *hpet_clocksource; static struct clocksource *hpet_clocksource;
#endif #endif
@ -465,6 +465,21 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp)
if (irq) { if (irq) {
unsigned long irq_flags; unsigned long irq_flags;
if (devp->hd_flags & HPET_SHARED_IRQ) {
/*
* To prevent the interrupt handler from seeing an
* unwanted interrupt status bit, program the timer
* so that it will not fire in the near future ...
*/
writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
&timer->hpet_config);
write_counter(read_counter(&hpet->hpet_mc),
&timer->hpet_compare);
/* ... and clear any left-over status. */
isr = 1 << (devp - devp->hd_hpets->hp_dev);
writel(isr, &hpet->hpet_isr);
}
sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev)); sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
irq_flags = devp->hd_flags & HPET_SHARED_IRQ irq_flags = devp->hd_flags & HPET_SHARED_IRQ
? IRQF_SHARED : IRQF_DISABLED; ? IRQF_SHARED : IRQF_DISABLED;
@ -581,11 +596,10 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
break; break;
case HPET_INFO: case HPET_INFO:
{ {
memset(info, 0, sizeof(*info));
if (devp->hd_ireqfreq) if (devp->hd_ireqfreq)
info->hi_ireqfreq = info->hi_ireqfreq =
hpet_time_div(hpetp, devp->hd_ireqfreq); hpet_time_div(hpetp, devp->hd_ireqfreq);
else
info->hi_ireqfreq = 0;
info->hi_flags = info->hi_flags =
readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK; readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
info->hi_hpet = hpetp->hp_which; info->hi_hpet = hpetp->hp_which;
@ -811,7 +825,7 @@ int hpet_alloc(struct hpet_data *hdp)
struct hpets *hpetp; struct hpets *hpetp;
size_t siz; size_t siz;
struct hpet __iomem *hpet; struct hpet __iomem *hpet;
static struct hpets *last = NULL; static struct hpets *last;
unsigned long period; unsigned long period;
unsigned long long temp; unsigned long long temp;
u32 remainder; u32 remainder;
@ -1000,6 +1014,8 @@ static int hpet_acpi_add(struct acpi_device *device)
return -ENODEV; return -ENODEV;
if (!data.hd_address || !data.hd_nirqs) { if (!data.hd_address || !data.hd_nirqs) {
if (data.hd_address)
iounmap(data.hd_address);
printk("%s: no address or irqs in _CRS\n", __func__); printk("%s: no address or irqs in _CRS\n", __func__);
return -ENODEV; return -ENODEV;
} }

View File

@ -1665,6 +1665,17 @@ static int check_hotmod_int_op(const char *curr, const char *option,
return 0; return 0;
} }
static struct smi_info *smi_info_alloc(void)
{
struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info) {
spin_lock_init(&info->si_lock);
spin_lock_init(&info->msg_lock);
}
return info;
}
static int hotmod_handler(const char *val, struct kernel_param *kp) static int hotmod_handler(const char *val, struct kernel_param *kp)
{ {
char *str = kstrdup(val, GFP_KERNEL); char *str = kstrdup(val, GFP_KERNEL);
@ -1779,7 +1790,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
} }
if (op == HM_ADD) { if (op == HM_ADD) {
info = kzalloc(sizeof(*info), GFP_KERNEL); info = smi_info_alloc();
if (!info) { if (!info) {
rv = -ENOMEM; rv = -ENOMEM;
goto out; goto out;
@ -1844,7 +1855,7 @@ static __devinit void hardcode_find_bmc(void)
if (!ports[i] && !addrs[i]) if (!ports[i] && !addrs[i])
continue; continue;
info = kzalloc(sizeof(*info), GFP_KERNEL); info = smi_info_alloc();
if (!info) if (!info)
return; return;
@ -2027,7 +2038,7 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
return -ENODEV; return -ENODEV;
} }
info = kzalloc(sizeof(*info), GFP_KERNEL); info = smi_info_alloc();
if (!info) { if (!info) {
printk(KERN_ERR PFX "Could not allocate SI data (3)\n"); printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
return -ENOMEM; return -ENOMEM;
@ -2137,7 +2148,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
if (!acpi_dev) if (!acpi_dev)
return -ENODEV; return -ENODEV;
info = kzalloc(sizeof(*info), GFP_KERNEL); info = smi_info_alloc();
if (!info) if (!info)
return -ENOMEM; return -ENOMEM;
@ -2318,7 +2329,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
{ {
struct smi_info *info; struct smi_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL); info = smi_info_alloc();
if (!info) { if (!info) {
printk(KERN_ERR PFX "Could not allocate SI data\n"); printk(KERN_ERR PFX "Could not allocate SI data\n");
return; return;
@ -2425,7 +2436,7 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK; int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
struct smi_info *info; struct smi_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL); info = smi_info_alloc();
if (!info) if (!info)
return -ENOMEM; return -ENOMEM;
@ -2566,7 +2577,7 @@ static int __devinit ipmi_of_probe(struct platform_device *dev,
return -EINVAL; return -EINVAL;
} }
info = kzalloc(sizeof(*info), GFP_KERNEL); info = smi_info_alloc();
if (!info) { if (!info) {
dev_err(&dev->dev, dev_err(&dev->dev,
@ -3013,7 +3024,7 @@ static __devinit void default_find_bmc(void)
if (check_legacy_ioport(ipmi_defaults[i].port)) if (check_legacy_ioport(ipmi_defaults[i].port))
continue; continue;
#endif #endif
info = kzalloc(sizeof(*info), GFP_KERNEL); info = smi_info_alloc();
if (!info) if (!info)
return; return;
@ -3138,9 +3149,6 @@ static int try_smi_init(struct smi_info *new_smi)
goto out_err; goto out_err;
} }
spin_lock_init(&(new_smi->si_lock));
spin_lock_init(&(new_smi->msg_lock));
/* Do low-level detection first. */ /* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) { if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->addr_source) if (new_smi->addr_source)

View File

@ -1467,7 +1467,7 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
return -EINVAL; return -EINVAL;
while (size) { while (size) {
copy = min(drest, min(size, dst->length)); copy = min3(drest, size, dst->length);
size -= copy; size -= copy;
drest -= copy; drest -= copy;
@ -1729,7 +1729,7 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
return -EINVAL; return -EINVAL;
while (size) { while (size) {
copy = min(srest, min(dst->length, size)); copy = min3(srest, dst->length, size);
daddr = kmap_atomic(sg_page(dst), KM_IRQ0); daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
memcpy(daddr + dst->offset + offset, saddr, copy); memcpy(daddr + dst->offset + offset, saddr, copy);

View File

@ -345,7 +345,7 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
do { do {
level = __ffs(pending); level = __ffs(pending);
handle_nested_irq(level + chip->irq_base); generic_handle_irq(level + chip->irq_base);
pending &= ~(1 << level); pending &= ~(1 << level);
} while (pending); } while (pending);
@ -360,7 +360,8 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
struct pca953x_platform_data *pdata = client->dev.platform_data; struct pca953x_platform_data *pdata = client->dev.platform_data;
int ret; int ret;
if (pdata->irq_base && (id->driver_data & PCA953X_INT)) { if (pdata->irq_base != -1
&& (id->driver_data & PCA953X_INT)) {
int lvl; int lvl;
ret = pca953x_read_reg(chip, PCA953X_INPUT, ret = pca953x_read_reg(chip, PCA953X_INPUT,
@ -383,7 +384,6 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
set_irq_chip_data(irq, chip); set_irq_chip_data(irq, chip);
set_irq_chip_and_handler(irq, &pca953x_irq_chip, set_irq_chip_and_handler(irq, &pca953x_irq_chip,
handle_edge_irq); handle_edge_irq);
set_irq_nested_thread(irq, 1);
#ifdef CONFIG_ARM #ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID); set_irq_flags(irq, IRQF_VALID);
#else #else
@ -394,6 +394,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
ret = request_threaded_irq(client->irq, ret = request_threaded_irq(client->irq,
NULL, NULL,
pca953x_irq_handler, pca953x_irq_handler,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(&client->dev), chip); dev_name(&client->dev), chip);
if (ret) { if (ret) {
@ -408,13 +409,13 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
return 0; return 0;
out_failed: out_failed:
chip->irq_base = 0; chip->irq_base = -1;
return ret; return ret;
} }
static void pca953x_irq_teardown(struct pca953x_chip *chip) static void pca953x_irq_teardown(struct pca953x_chip *chip)
{ {
if (chip->irq_base) if (chip->irq_base != -1)
free_irq(chip->client->irq, chip); free_irq(chip->client->irq, chip);
} }
#else /* CONFIG_GPIO_PCA953X_IRQ */ #else /* CONFIG_GPIO_PCA953X_IRQ */
@ -424,7 +425,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
struct i2c_client *client = chip->client; struct i2c_client *client = chip->client;
struct pca953x_platform_data *pdata = client->dev.platform_data; struct pca953x_platform_data *pdata = client->dev.platform_data;
if (pdata->irq_base && (id->driver_data & PCA953X_INT)) if (pdata->irq_base != -1 && (id->driver_data & PCA953X_INT))
dev_warn(&client->dev, "interrupt support not compiled in\n"); dev_warn(&client->dev, "interrupt support not compiled in\n");
return 0; return 0;

View File

@ -155,11 +155,11 @@ fast_shmem_read(struct page **pages,
char __iomem *vaddr; char __iomem *vaddr;
int unwritten; int unwritten;
vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
if (vaddr == NULL) if (vaddr == NULL)
return -ENOMEM; return -ENOMEM;
unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
kunmap_atomic(vaddr, KM_USER0); kunmap_atomic(vaddr);
if (unwritten) if (unwritten)
return -EFAULT; return -EFAULT;
@ -509,10 +509,10 @@ fast_user_write(struct io_mapping *mapping,
char *vaddr_atomic; char *vaddr_atomic;
unsigned long unwritten; unsigned long unwritten;
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0); vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
user_data, length); user_data, length);
io_mapping_unmap_atomic(vaddr_atomic, KM_USER0); io_mapping_unmap_atomic(vaddr_atomic);
if (unwritten) if (unwritten)
return -EFAULT; return -EFAULT;
return 0; return 0;
@ -551,11 +551,11 @@ fast_shmem_write(struct page **pages,
char __iomem *vaddr; char __iomem *vaddr;
unsigned long unwritten; unsigned long unwritten;
vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
if (vaddr == NULL) if (vaddr == NULL)
return -ENOMEM; return -ENOMEM;
unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
kunmap_atomic(vaddr, KM_USER0); kunmap_atomic(vaddr);
if (unwritten) if (unwritten)
return -EFAULT; return -EFAULT;
@ -3346,8 +3346,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
reloc_offset = obj_priv->gtt_offset + reloc->offset; reloc_offset = obj_priv->gtt_offset + reloc->offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
(reloc_offset & (reloc_offset &
~(PAGE_SIZE - 1)), ~(PAGE_SIZE - 1)));
KM_USER0);
reloc_entry = (uint32_t __iomem *)(reloc_page + reloc_entry = (uint32_t __iomem *)(reloc_page +
(reloc_offset & (PAGE_SIZE - 1))); (reloc_offset & (PAGE_SIZE - 1)));
reloc_val = target_obj_priv->gtt_offset + reloc->delta; reloc_val = target_obj_priv->gtt_offset + reloc->delta;
@ -3358,7 +3357,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
readl(reloc_entry), reloc_val); readl(reloc_entry), reloc_val);
#endif #endif
writel(reloc_val, reloc_entry); writel(reloc_val, reloc_entry);
io_mapping_unmap_atomic(reloc_page, KM_USER0); io_mapping_unmap_atomic(reloc_page);
/* The updated presumed offset for this entry will be /* The updated presumed offset for this entry will be
* copied back out to the user. * copied back out to the user.
@ -4772,11 +4771,11 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_count = obj->size / PAGE_SIZE; page_count = obj->size / PAGE_SIZE;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0); char *dst = kmap_atomic(obj_priv->pages[i]);
char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE); memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(dst, KM_USER0); kunmap_atomic(dst);
} }
drm_clflush_pages(obj_priv->pages, page_count); drm_clflush_pages(obj_priv->pages, page_count);
drm_agp_chipset_flush(dev); drm_agp_chipset_flush(dev);
@ -4833,11 +4832,11 @@ i915_gem_attach_phys_object(struct drm_device *dev,
page_count = obj->size / PAGE_SIZE; page_count = obj->size / PAGE_SIZE;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
char *src = kmap_atomic(obj_priv->pages[i], KM_USER0); char *src = kmap_atomic(obj_priv->pages[i]);
char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE); memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src, KM_USER0); kunmap_atomic(src);
} }
i915_gem_object_put_pages(obj); i915_gem_object_put_pages(obj);

View File

@ -456,10 +456,9 @@ i915_error_object_create(struct drm_device *dev,
local_irq_save(flags); local_irq_save(flags);
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
reloc_offset, reloc_offset);
KM_IRQ0);
memcpy_fromio(d, s, PAGE_SIZE); memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s, KM_IRQ0); io_mapping_unmap_atomic(s);
local_irq_restore(flags); local_irq_restore(flags);
dst->pages[page] = d; dst->pages[page] = d;

View File

@ -187,8 +187,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
if (OVERLAY_NONPHYSICAL(overlay->dev)) { if (OVERLAY_NONPHYSICAL(overlay->dev)) {
regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
overlay->reg_bo->gtt_offset, overlay->reg_bo->gtt_offset);
KM_USER0);
if (!regs) { if (!regs) {
DRM_ERROR("failed to map overlay regs in GTT\n"); DRM_ERROR("failed to map overlay regs in GTT\n");
@ -203,7 +202,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
{ {
if (OVERLAY_NONPHYSICAL(overlay->dev)) if (OVERLAY_NONPHYSICAL(overlay->dev))
io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0); io_mapping_unmap_atomic(overlay->virt_addr);
overlay->virt_addr = NULL; overlay->virt_addr = NULL;

View File

@ -2167,11 +2167,11 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb,
if (off < pci_resource_len(dev->pdev, 1)) { if (off < pci_resource_len(dev->pdev, 1)) {
uint8_t __iomem *p = uint8_t __iomem *p =
io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
val = ioread32(p + (off & ~PAGE_MASK)); val = ioread32(p + (off & ~PAGE_MASK));
io_mapping_unmap_atomic(p, KM_USER0); io_mapping_unmap_atomic(p);
} }
return val; return val;
@ -2183,12 +2183,12 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb,
{ {
if (off < pci_resource_len(dev->pdev, 1)) { if (off < pci_resource_len(dev->pdev, 1)) {
uint8_t __iomem *p = uint8_t __iomem *p =
io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
iowrite32(val, p + (off & ~PAGE_MASK)); iowrite32(val, p + (off & ~PAGE_MASK));
wmb(); wmb();
io_mapping_unmap_atomic(p, KM_USER0); io_mapping_unmap_atomic(p);
} }
} }

View File

@ -170,7 +170,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
#ifdef CONFIG_X86 #ifdef CONFIG_X86
dst = kmap_atomic_prot(d, KM_USER0, prot); dst = kmap_atomic_prot(d, prot);
#else #else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
dst = vmap(&d, 1, 0, prot); dst = vmap(&d, 1, 0, prot);
@ -183,7 +183,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
memcpy_fromio(dst, src, PAGE_SIZE); memcpy_fromio(dst, src, PAGE_SIZE);
#ifdef CONFIG_X86 #ifdef CONFIG_X86
kunmap_atomic(dst, KM_USER0); kunmap_atomic(dst);
#else #else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(dst); vunmap(dst);
@ -206,7 +206,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
#ifdef CONFIG_X86 #ifdef CONFIG_X86
src = kmap_atomic_prot(s, KM_USER0, prot); src = kmap_atomic_prot(s, prot);
#else #else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
src = vmap(&s, 1, 0, prot); src = vmap(&s, 1, 0, prot);
@ -219,7 +219,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
memcpy_toio(dst, src, PAGE_SIZE); memcpy_toio(dst, src, PAGE_SIZE);
#ifdef CONFIG_X86 #ifdef CONFIG_X86
kunmap_atomic(src, KM_USER0); kunmap_atomic(src);
#else #else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(src); vunmap(src);

View File

@ -1409,8 +1409,7 @@ static int __init ipoib_init_module(void)
ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE, ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
IPOIB_MIN_QUEUE_SIZE));
#ifdef CONFIG_INFINIBAND_IPOIB_CM #ifdef CONFIG_INFINIBAND_IPOIB_CM
ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
#endif #endif

View File

@ -482,7 +482,7 @@ static s32 pm121_correct(s32 new_setpoint,
new_min += correction->offset; new_min += correction->offset;
new_min = (new_min >> 16) + min; new_min = (new_min >> 16) + min;
return max(new_setpoint, max(new_min, 0)); return max3(new_setpoint, new_min, 0);
} }
static s32 pm121_connect(unsigned int control_id, s32 setpoint) static s32 pm121_connect(unsigned int control_id, s32 setpoint)

View File

@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
* Issue the synchronous I/O from a different thread * Issue the synchronous I/O from a different thread
* to avoid generic_make_request recursion. * to avoid generic_make_request recursion.
*/ */
INIT_WORK_ON_STACK(&req.work, do_metadata); INIT_WORK_ONSTACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work); queue_work(ps->metadata_wq, &req.work);
flush_workqueue(ps->metadata_wq); flush_workqueue(ps->metadata_wq);

View File

@ -4,7 +4,6 @@
menuconfig MISC_DEVICES menuconfig MISC_DEVICES
bool "Misc devices" bool "Misc devices"
default y
---help--- ---help---
Say Y here to get to see options for device drivers from various Say Y here to get to see options for device drivers from various
different categories. This option alone does not add any kernel code. different categories. This option alone does not add any kernel code.
@ -24,7 +23,8 @@ config AD525X_DPOT
AD5260, AD5262, AD5263, AD5290, AD5291, AD5292, AD5293, AD5260, AD5262, AD5263, AD5290, AD5291, AD5292, AD5293,
AD7376, AD8400, AD8402, AD8403, ADN2850, AD5241, AD5242, AD7376, AD8400, AD8402, AD8403, ADN2850, AD5241, AD5242,
AD5243, AD5245, AD5246, AD5247, AD5248, AD5280, AD5282, AD5243, AD5245, AD5246, AD5247, AD5248, AD5280, AD5282,
ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173 ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173, AD5270,
AD5271, AD5272, AD5274
digital potentiometer chips. digital potentiometer chips.
See Documentation/misc-devices/ad525x_dpot.txt for the See Documentation/misc-devices/ad525x_dpot.txt for the
@ -284,6 +284,16 @@ config SGI_GRU_DEBUG
This option enables addition debugging code for the SGI GRU driver. If This option enables addition debugging code for the SGI GRU driver. If
you are unsure, say N. you are unsure, say N.
config APDS9802ALS
tristate "Medfield Avago APDS9802 ALS Sensor module"
depends on I2C
help
If you say yes here you get support for the ALS APDS9802 ambient
light sensor.
This driver can also be built as a module. If so, the module
will be called apds9802als.
config ISL29003 config ISL29003
tristate "Intersil ISL29003 ambient light sensor" tristate "Intersil ISL29003 ambient light sensor"
depends on I2C && SYSFS depends on I2C && SYSFS
@ -294,6 +304,16 @@ config ISL29003
This driver can also be built as a module. If so, the module This driver can also be built as a module. If so, the module
will be called isl29003. will be called isl29003.
config ISL29020
tristate "Intersil ISL29020 ambient light sensor"
depends on I2C
help
If you say yes here you get support for the Intersil ISL29020
ambient light sensor.
This driver can also be built as a module. If so, the module
will be called isl29020.
config SENSORS_TSL2550 config SENSORS_TSL2550
tristate "Taos TSL2550 ambient light sensor" tristate "Taos TSL2550 ambient light sensor"
depends on I2C && SYSFS depends on I2C && SYSFS
@ -314,6 +334,27 @@ config SENSORS_BH1780
This driver can also be built as a module. If so, the module This driver can also be built as a module. If so, the module
will be called bh1780gli. will be called bh1780gli.
config SENSORS_BH1770
tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
depends on I2C
---help---
Say Y here if you want to build a driver for BH1770GLC (ROHM) or
SFH7770 (Osram) combined ambient light and proximity sensor chip.
To compile this driver as a module, choose M here: the
module will be called bh1770glc. If unsure, say N here.
config SENSORS_APDS990X
tristate "APDS990X combined als and proximity sensors"
depends on I2C
default n
---help---
Say Y here if you want to build a driver for Avago APDS990x
combined ambient light and proximity sensor chip.
To compile this driver as a module, choose M here: the
module will be called apds990x. If unsure, say N here.
config HMC6352 config HMC6352
tristate "Honeywell HMC6352 compass" tristate "Honeywell HMC6352 compass"
depends on I2C depends on I2C

View File

@ -16,6 +16,8 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o obj-$(CONFIG_PHANTOM) += phantom.o
obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o
obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
obj-$(CONFIG_SGI_IOC4) += ioc4.o obj-$(CONFIG_SGI_IOC4) += ioc4.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
@ -23,7 +25,9 @@ obj-$(CONFIG_SGI_XP) += sgi-xp/
obj-$(CONFIG_SGI_GRU) += sgi-gru/ obj-$(CONFIG_SGI_GRU) += sgi-gru/
obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
obj-$(CONFIG_HP_ILO) += hpilo.o obj-$(CONFIG_HP_ILO) += hpilo.o
obj-$(CONFIG_APDS9802ALS) += apds9802als.o
obj-$(CONFIG_ISL29003) += isl29003.o obj-$(CONFIG_ISL29003) += isl29003.o
obj-$(CONFIG_ISL29020) += isl29020.o
obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
obj-$(CONFIG_DS1682) += ds1682.o obj-$(CONFIG_DS1682) += ds1682.o

View File

@ -102,6 +102,8 @@ static const struct i2c_device_id ad_dpot_id[] = {
{"ad5170", AD5170_ID}, {"ad5170", AD5170_ID},
{"ad5172", AD5172_ID}, {"ad5172", AD5172_ID},
{"ad5173", AD5173_ID}, {"ad5173", AD5173_ID},
{"ad5272", AD5272_ID},
{"ad5274", AD5274_ID},
{} {}
}; };
MODULE_DEVICE_TABLE(i2c, ad_dpot_id); MODULE_DEVICE_TABLE(i2c, ad_dpot_id);

View File

@ -38,6 +38,8 @@ static const struct ad_dpot_id ad_dpot_spi_devlist[] = {
{.name = "ad8402", .devid = AD8402_ID}, {.name = "ad8402", .devid = AD8402_ID},
{.name = "ad8403", .devid = AD8403_ID}, {.name = "ad8403", .devid = AD8403_ID},
{.name = "adn2850", .devid = ADN2850_ID}, {.name = "adn2850", .devid = ADN2850_ID},
{.name = "ad5270", .devid = AD5270_ID},
{.name = "ad5271", .devid = AD5271_ID},
{} {}
}; };
@ -53,13 +55,13 @@ static int write8(void *client, u8 val)
static int write16(void *client, u8 reg, u8 val) static int write16(void *client, u8 reg, u8 val)
{ {
u8 data[2] = {reg, val}; u8 data[2] = {reg, val};
return spi_write(client, data, 1); return spi_write(client, data, 2);
} }
static int write24(void *client, u8 reg, u16 val) static int write24(void *client, u8 reg, u16 val)
{ {
u8 data[3] = {reg, val >> 8, val}; u8 data[3] = {reg, val >> 8, val};
return spi_write(client, data, 1); return spi_write(client, data, 3);
} }
static int read8(void *client) static int read8(void *client)

View File

@ -29,9 +29,9 @@
* AD5262 2 256 20, 50, 200 * AD5262 2 256 20, 50, 200
* AD5263 4 256 20, 50, 200 * AD5263 4 256 20, 50, 200
* AD5290 1 256 10, 50, 100 * AD5290 1 256 10, 50, 100
* AD5291 1 256 20 * AD5291 1 256 20, 50, 100 (20-TP)
* AD5292 1 1024 20 * AD5292 1 1024 20, 50, 100 (20-TP)
* AD5293 1 1024 20 * AD5293 1 1024 20, 50, 100
* AD7376 1 128 10, 50, 100, 1M * AD7376 1 128 10, 50, 100, 1M
* AD8400 1 256 1, 10, 50, 100 * AD8400 1 256 1, 10, 50, 100
* AD8402 2 256 1, 10, 50, 100 * AD8402 2 256 1, 10, 50, 100
@ -52,6 +52,10 @@
* AD5170 1 256 2.5, 10, 50, 100 (OTP) * AD5170 1 256 2.5, 10, 50, 100 (OTP)
* AD5172 2 256 2.5, 10, 50, 100 (OTP) * AD5172 2 256 2.5, 10, 50, 100 (OTP)
* AD5173 2 256 2.5, 10, 50, 100 (OTP) * AD5173 2 256 2.5, 10, 50, 100 (OTP)
* AD5270 1 1024 20, 50, 100 (50-TP)
* AD5271 1 256 20, 50, 100 (50-TP)
* AD5272 1 1024 20, 50, 100 (50-TP)
* AD5274 1 256 20, 50, 100 (50-TP)
* *
* See Documentation/misc-devices/ad525x_dpot.txt for more info. * See Documentation/misc-devices/ad525x_dpot.txt for more info.
* *
@ -126,18 +130,38 @@ static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val)
static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg) static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
{ {
unsigned ctrl = 0; unsigned ctrl = 0;
int value;
if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) { if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
if (dpot->feat & F_RDACS_WONLY) if (dpot->feat & F_RDACS_WONLY)
return dpot->rdac_cache[reg & DPOT_RDAC_MASK]; return dpot->rdac_cache[reg & DPOT_RDAC_MASK];
if (dpot->uid == DPOT_UID(AD5291_ID) || if (dpot->uid == DPOT_UID(AD5291_ID) ||
dpot->uid == DPOT_UID(AD5292_ID) || dpot->uid == DPOT_UID(AD5292_ID) ||
dpot->uid == DPOT_UID(AD5293_ID)) dpot->uid == DPOT_UID(AD5293_ID)) {
return dpot_read_r8d8(dpot,
value = dpot_read_r8d8(dpot,
DPOT_AD5291_READ_RDAC << 2); DPOT_AD5291_READ_RDAC << 2);
if (dpot->uid == DPOT_UID(AD5291_ID))
value = value >> 2;
return value;
} else if (dpot->uid == DPOT_UID(AD5270_ID) ||
dpot->uid == DPOT_UID(AD5271_ID)) {
value = dpot_read_r8d8(dpot,
DPOT_AD5270_1_2_4_READ_RDAC << 2);
if (value < 0)
return value;
if (dpot->uid == DPOT_UID(AD5271_ID))
value = value >> 2;
return value;
}
ctrl = DPOT_SPI_READ_RDAC; ctrl = DPOT_SPI_READ_RDAC;
} else if (reg & DPOT_ADDR_EEPROM) { } else if (reg & DPOT_ADDR_EEPROM) {
ctrl = DPOT_SPI_READ_EEPROM; ctrl = DPOT_SPI_READ_EEPROM;
@ -153,6 +177,7 @@ static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg) static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
{ {
int value;
unsigned ctrl = 0; unsigned ctrl = 0;
switch (dpot->uid) { switch (dpot->uid) {
case DPOT_UID(AD5246_ID): case DPOT_UID(AD5246_ID):
@ -166,7 +191,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
case DPOT_UID(AD5280_ID): case DPOT_UID(AD5280_ID):
case DPOT_UID(AD5282_ID): case DPOT_UID(AD5282_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
0 : DPOT_AD5291_RDAC_AB; 0 : DPOT_AD5282_RDAC_AB;
return dpot_read_r8d8(dpot, ctrl); return dpot_read_r8d8(dpot, ctrl);
case DPOT_UID(AD5170_ID): case DPOT_UID(AD5170_ID):
case DPOT_UID(AD5171_ID): case DPOT_UID(AD5171_ID):
@ -175,8 +200,27 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
case DPOT_UID(AD5172_ID): case DPOT_UID(AD5172_ID):
case DPOT_UID(AD5173_ID): case DPOT_UID(AD5173_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
0 : DPOT_AD5272_3_A0; 0 : DPOT_AD5172_3_A0;
return dpot_read_r8d8(dpot, ctrl); return dpot_read_r8d8(dpot, ctrl);
case DPOT_UID(AD5272_ID):
case DPOT_UID(AD5274_ID):
dpot_write_r8d8(dpot,
(DPOT_AD5270_1_2_4_READ_RDAC << 2), 0);
value = dpot_read_r8d16(dpot,
DPOT_AD5270_1_2_4_RDAC << 2);
if (value < 0)
return value;
/*
* AD5272/AD5274 returns high byte first, however
* underling smbus expects low byte first.
*/
value = swab16(value);
if (dpot->uid == DPOT_UID(AD5271_ID))
value = value >> 2;
return value;
default: default:
if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256)) if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
return dpot_read_r8d16(dpot, (reg & 0xF8) | return dpot_read_r8d16(dpot, (reg & 0xF8) |
@ -198,7 +242,7 @@ static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
{ {
unsigned val = 0; unsigned val = 0;
if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) { if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD | DPOT_ADDR_OTP))) {
if (dpot->feat & F_RDACS_WONLY) if (dpot->feat & F_RDACS_WONLY)
dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value; dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value;
@ -219,11 +263,30 @@ static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
} else { } else {
if (dpot->uid == DPOT_UID(AD5291_ID) || if (dpot->uid == DPOT_UID(AD5291_ID) ||
dpot->uid == DPOT_UID(AD5292_ID) || dpot->uid == DPOT_UID(AD5292_ID) ||
dpot->uid == DPOT_UID(AD5293_ID)) dpot->uid == DPOT_UID(AD5293_ID)) {
dpot_write_r8d8(dpot, DPOT_AD5291_CTRLREG << 2,
DPOT_AD5291_UNLOCK_CMD);
if (dpot->uid == DPOT_UID(AD5291_ID))
value = value << 2;
return dpot_write_r8d8(dpot, return dpot_write_r8d8(dpot,
(DPOT_AD5291_RDAC << 2) | (DPOT_AD5291_RDAC << 2) |
(value >> 8), value & 0xFF); (value >> 8), value & 0xFF);
} else if (dpot->uid == DPOT_UID(AD5270_ID) ||
dpot->uid == DPOT_UID(AD5271_ID)) {
dpot_write_r8d8(dpot,
DPOT_AD5270_1_2_4_CTRLREG << 2,
DPOT_AD5270_1_2_4_UNLOCK_CMD);
if (dpot->uid == DPOT_UID(AD5271_ID))
value = value << 2;
return dpot_write_r8d8(dpot,
(DPOT_AD5270_1_2_4_RDAC << 2) |
(value >> 8), value & 0xFF);
}
val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK); val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK);
} }
} else if (reg & DPOT_ADDR_EEPROM) { } else if (reg & DPOT_ADDR_EEPROM) {
@ -243,6 +306,16 @@ static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
val = DPOT_SPI_INC_ALL; val = DPOT_SPI_INC_ALL;
break; break;
} }
} else if (reg & DPOT_ADDR_OTP) {
if (dpot->uid == DPOT_UID(AD5291_ID) ||
dpot->uid == DPOT_UID(AD5292_ID)) {
return dpot_write_r8d8(dpot,
DPOT_AD5291_STORE_XTPM << 2, 0);
} else if (dpot->uid == DPOT_UID(AD5270_ID) ||
dpot->uid == DPOT_UID(AD5271_ID)) {
return dpot_write_r8d8(dpot,
DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0);
}
} else } else
BUG(); BUG();
@ -273,7 +346,7 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
case DPOT_UID(AD5280_ID): case DPOT_UID(AD5280_ID):
case DPOT_UID(AD5282_ID): case DPOT_UID(AD5282_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
0 : DPOT_AD5291_RDAC_AB; 0 : DPOT_AD5282_RDAC_AB;
return dpot_write_r8d8(dpot, ctrl, value); return dpot_write_r8d8(dpot, ctrl, value);
break; break;
case DPOT_UID(AD5171_ID): case DPOT_UID(AD5171_ID):
@ -289,12 +362,12 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
case DPOT_UID(AD5172_ID): case DPOT_UID(AD5172_ID):
case DPOT_UID(AD5173_ID): case DPOT_UID(AD5173_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
0 : DPOT_AD5272_3_A0; 0 : DPOT_AD5172_3_A0;
if (reg & DPOT_ADDR_OTP) { if (reg & DPOT_ADDR_OTP) {
tmp = dpot_read_r8d16(dpot, ctrl); tmp = dpot_read_r8d16(dpot, ctrl);
if (tmp >> 14) /* Ready to Program? */ if (tmp >> 14) /* Ready to Program? */
return -EFAULT; return -EFAULT;
ctrl |= DPOT_AD5270_2_3_FUSE; ctrl |= DPOT_AD5170_2_3_FUSE;
} }
return dpot_write_r8d8(dpot, ctrl, value); return dpot_write_r8d8(dpot, ctrl, value);
break; break;
@ -303,10 +376,25 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
tmp = dpot_read_r8d16(dpot, tmp); tmp = dpot_read_r8d16(dpot, tmp);
if (tmp >> 14) /* Ready to Program? */ if (tmp >> 14) /* Ready to Program? */
return -EFAULT; return -EFAULT;
ctrl = DPOT_AD5270_2_3_FUSE; ctrl = DPOT_AD5170_2_3_FUSE;
} }
return dpot_write_r8d8(dpot, ctrl, value); return dpot_write_r8d8(dpot, ctrl, value);
break; break;
case DPOT_UID(AD5272_ID):
case DPOT_UID(AD5274_ID):
dpot_write_r8d8(dpot, DPOT_AD5270_1_2_4_CTRLREG << 2,
DPOT_AD5270_1_2_4_UNLOCK_CMD);
if (reg & DPOT_ADDR_OTP)
return dpot_write_r8d8(dpot,
DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0);
if (dpot->uid == DPOT_UID(AD5274_ID))
value = value << 2;
return dpot_write_r8d8(dpot, (DPOT_AD5270_1_2_4_RDAC << 2) |
(value >> 8), value & 0xFF);
break;
default: default:
if (reg & DPOT_ADDR_CMD) if (reg & DPOT_ADDR_CMD)
return dpot_write_d8(dpot, reg); return dpot_write_d8(dpot, reg);
@ -320,7 +408,6 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
} }
} }
static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value) static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value)
{ {
if (dpot->feat & F_SPI) if (dpot->feat & F_SPI)

Some files were not shown because too many files have changed in this diff Show More