interim commit

This commit is contained in:
Nick Foster 2011-11-17 14:58:19 -08:00
parent 107dc1b969
commit 7a4f703e2b
6 changed files with 92 additions and 40 deletions

View File

@ -78,7 +78,7 @@ static double correlate_preamble(const float *in, int samples_per_chip) {
return corr;
}
//todo: make it return a pair of some kind, otherwise you can lose precision
static double pmt_to_timestamp(pmt::pmt_t tstamp, uint64_t abs_sample_cnt, double secs_per_sample) {
uint64_t ts_sample, last_whole_stamp;
double last_frac_stamp;

View File

@ -2,9 +2,10 @@
from modes_parse import modes_parse
import mlat
import numpy
import sys
sffile = open("27augsf3.txt")
rudifile = open("27augrudi3.txt")
#sffile = open("27augsf3.txt")
#rudifile = open("27augrudi3.txt")
#sfoutfile = open("sfout.txt", "w")
#rudioutfile = open("rudiout.txt", "w")
@ -13,23 +14,26 @@ sfparse = modes_parse([37.762236,-122.442525])
sf_station = [37.762236,-122.442525, 100]
mv_station = [37.409348,-122.07732, 100]
bk_station = [37.854246, -122.266701, 100]
raw_stamps = []
#first iterate through both files to find the estimated time difference. doesn't have to be accurate to more than 1ms or so.
#to do this, look for type 17 position packets with the same data. assume they're unique. print the tdiff.
#let's do this right for once
#collect a list of raw timestamps for each aircraft from each station
#the raw stamps have to be processed into corrected stamps OR distance has to be included in each
#then postprocess to find clock delay for each and determine drift rate for each aircraft separately
#then come up with an average clock drift rate
#then find an average drift-corrected clock delay
#then find rms error
#ok so get [ICAO, [raw stamps], [distance]] for each matched record
files = [sffile, rudifile]
stations = [sf_station, mv_station]
files = [open(arg) for arg in sys.argv[1:]]
#files = [sffile, rudifile]
stations = [sf_station, mv_station]#, bk_station]
records = []
@ -62,6 +66,8 @@ for station0_report in records[0]: #iterate over list of reports from station 0
if len(stamps) == len(records): #found same report in all records
all_heard.append({"data": station0_report["data"], "times": stamps})
#print all_heard
#ok, now let's pull out the location-bearing packets so we can find our time offset
position_reports = [x for x in all_heard if x["data"]["msgtype"] == 17 and 9 <= (x["data"]["longdata"] >> 51) & 0x1F <= 18]
offset_list = []
@ -76,17 +82,19 @@ for msg in position_reports:
range_to_ac = numpy.linalg.norm(numpy.array(mlat.llh2ecef(station))-numpy.array(mlat.llh2ecef(ac_pos)))
timestamp_at_ac = time - range_to_ac / mlat.c
rel_times.append(timestamp_at_ac)
offset_list.append({"aircraft": data["shortdata"], "times": rel_times})
offset_list.append({"aircraft": data["shortdata"] & 0xffffff, "times": rel_times})
#this is a list of unique aircraft, heard by all stations, which transmitted position packets
#we do drift calcs separately for each aircraft in the set because mixing them seems to screw things up
#i haven't really sat down and figured out why that is yet
unique_aircraft = list(set([x["aircraft"] for x in offset_list]))
print "Aircraft heard for clock drift estimate: %s" % [str("%x" % ac) for ac in unique_aircraft]
print "Total reports used: %d over %.2f seconds" % (len(position_reports), position_reports[-1]["times"][0]-position_reports[0]["times"][0])
#get a list of reported times gathered by the unique aircraft that transmitted them
#abs_unique_times = [report["times"] for ac in unique_aircraft for report in offset_list if report["aircraft"] == ac]
#print abs_unique_times
#todo: the below can be done cleaner with nested list comprehensions
#todo: the below can probably be done cleaner with nested list comprehensions
clock_rate_corrections = [0]
for i in range(1,len(stations)):
drift_error_limited = []
@ -107,8 +115,18 @@ for i in range(1,len(stations)):
for i in range(len(clock_rate_corrections)):
print "drift from %d relative to station 0: %.3fppm" % (i, clock_rate_corrections[i] * 1e6)
#ok now we have clock rate corrections for each station, and we can multilaterate like hell
#we'll need a way to get a clock offset at a particular time, in case the dataset has a discontinuity
#actually we don't, unless we still have the old "timestamps-are-wrong-in-b100" issue
#remember to subtract out rel_time before correcting for clock rate error
#start with all_heard
#let's get the average clock offset (based on drift-corrected, TDOA-corrected derived timestamps)
clock_offsets = [[numpy.mean([x["times"][i]*(1+clock_rate_corrections[i])-x["times"][0] for x in offset_list])][0] for i in range(0,len(stations))]
for i in range(len(clock_offsets)):
print "mean offset from %d relative to station 0: %.3f seconds" % (i, clock_offsets[i])
#for the two-station case, let's now go back, armed with our clock drift and offset, and get the variance between expected and observed timestamps
error_list = []
for i in range(1,len(stations)):
for report in offset_list:
error = abs(((report["times"][i]*(1+clock_rate_corrections[i]) - report["times"][0]) - clock_offsets[i]) * mlat.c)
error_list.append(error)
#print error
rms_error = (numpy.mean([error**2 for error in error_list]))**0.5
print "RMS error in TDOA: %.1f meters" % rms_error

View File

@ -0,0 +1,40 @@
#!/usr/bin/env python
import numpy
import mlat
#rudi says:
#17 8da12615 903bf4bd3eb2c0 36ac95 000000 0.0007421782357 2.54791875
#17 8d4b190a 682de4acf8c177 5b8f55 000000 0.0005142348236 2.81227225
#sf says:
#17 8da12615 903bf4bd3eb2c0 36ac95 000000 0.003357535461 00.1817445
#17 8d4b190a 682de4acf8c177 5b8f55 000000 0.002822938375 000.446215
sf_station = [37.762236,-122.442525, 100]
mv_station = [37.409348,-122.07732, 100]
report1_location = [37.737804, -122.485139, 3345]
report1_sf_tstamp = 0.1817445
report1_mv_tstamp = 2.54791875
report2_location = [37.640836, -122.260218, 2484]
report2_sf_tstamp = 0.446215
report2_mv_tstamp = 2.81227225
report1_tof_sf = numpy.linalg.norm(numpy.array(mlat.llh2ecef(sf_station))-numpy.array(mlat.llh2ecef(report1_location))) / mlat.c
report1_tof_mv = numpy.linalg.norm(numpy.array(mlat.llh2ecef(mv_station))-numpy.array(mlat.llh2ecef(report1_location))) / mlat.c
report1_sf_tstamp_abs = report1_sf_tstamp - report1_tof_sf
report1_mv_tstamp_abs = report1_mv_tstamp - report1_tof_mv
report2_tof_sf = numpy.linalg.norm(numpy.array(mlat.llh2ecef(sf_station))-numpy.array(mlat.llh2ecef(report2_location))) / mlat.c
report2_tof_mv = numpy.linalg.norm(numpy.array(mlat.llh2ecef(mv_station))-numpy.array(mlat.llh2ecef(report2_location))) / mlat.c
report2_sf_tstamp_abs = report2_sf_tstamp - report2_tof_sf
report2_mv_tstamp_abs = report2_mv_tstamp - report2_tof_mv
dt1 = report1_sf_tstamp_abs - report1_mv_tstamp_abs
dt2 = report2_sf_tstamp_abs - report2_mv_tstamp_abs
error = abs((dt1-dt2) * mlat.c)
print error

View File

@ -18,7 +18,6 @@ print teststamps
replies = []
for i in range(0, len(teststations)):
replies.append((teststations[i], teststamps[i]))
ans = mlat.mlat(replies, testalt)
error = numpy.linalg.norm(numpy.array(mlat.llh2ecef(ans))-numpy.array(testplane))
range = numpy.linalg.norm(mlat.llh2geoid(ans)-numpy.array(testme))

View File

@ -100,18 +100,13 @@ def mlat_iter(rel_stations, prange_obs, xguess = [0,0,0], limit = 20, maxrounds
xerr = [1e9, 1e9, 1e9]
rounds = 0
while numpy.linalg.norm(xerr) > limit:
prange_est = []
for station in rel_stations:
prange_est.append([numpy.linalg.norm(station - xguess)])
prange_est = [[numpy.linalg.norm(station - xguess)] for station in rel_stations]
dphat = prange_obs - prange_est
H = []
for row in range(0,len(rel_stations)):
H.append((numpy.array(-rel_stations[row,:])+xguess) / prange_est[row])
H = numpy.array(H)
H = numpy.array([(numpy.array(-rel_stations[row,:])+xguess) / prange_est[row] for row in range(0,len(rel_stations))])
#now we have H, the Jacobian, and can solve for residual error
xerr = numpy.linalg.lstsq(H, dphat)[0].flatten()
xguess += xerr
print xguess, xerr
#print xguess, xerr
rounds += 1
if rounds > maxrounds:
raise Exception("Failed to converge!")
@ -127,30 +122,20 @@ def mlat_iter(rel_stations, prange_obs, xguess = [0,0,0], limit = 20, maxrounds
def mlat(replies, altitude):
sorted_replies = sorted(replies, key=lambda time: time[1])
stations = []
timestamps = []
#i really couldn't figure out how to unpack this, sorry
for sorted_reply in sorted_replies:
stations.append(sorted_reply[0])
timestamps.append(sorted_reply[1])
stations = [sorted_reply[0] for sorted_reply in sorted_replies]
timestamps = [sorted_reply[1] for sorted_reply in sorted_replies]
me_llh = stations[0]
me = llh2geoid(stations[0])
rel_stations = [] #list of stations in XYZ relative to me
for station in stations[1:]:
rel_stations.append(numpy.array(llh2geoid(station)) - numpy.array(me))
rel_stations.append([0,0,0]-numpy.array(me)) #arne saknussemm, reporting in
#list of stations in XYZ relative to me
rel_stations = [numpy.array(llh2geoid(station)) - numpy.array(me) for station in stations[1:]]
rel_stations.append([0,0,0] - numpy.array(me))
rel_stations = numpy.array(rel_stations) #convert list of arrays to 2d array
#differentiate the timestamps to get TDOA
tdoa = []
for stamp in timestamps[1:]:
tdoa.append(stamp - timestamps[0])
prange_obs = []
for stamp in tdoa:
prange_obs.append([c * stamp])
#differentiate the timestamps to get TDOA, multiply by c to get pseudorange
prange_obs = [[c*(stamp-timestamps[0])] for stamp in timestamps[1:]]
#so here we calc the estimated pseudorange to the center of the earth, using station[0] as a reference point for the geoid
#in other words, we say "if the aircraft were directly overhead of station[0], this is the prange to the center of the earth"
@ -161,6 +146,7 @@ def mlat(replies, altitude):
#xguess = llh2ecef([37.617175,-122.400843, 8000])-numpy.array(me)
#xguess = [0,0,0]
#start our guess directly overhead, who cares
xguess = numpy.array(llh2ecef([me_llh[0], me_llh[1], altitude])) - numpy.array(me)
xyzpos = mlat_iter(rel_stations, prange_obs, xguess)
@ -171,6 +157,7 @@ def mlat(replies, altitude):
#nearest station, results in significant error due to the oblateness of the Earth's geometry.
#so now we solve AGAIN, but this time with the corrected pseudorange of the aircraft altitude
#this might not be really useful in practice but the sim shows >50m errors without it
#and <4cm errors with it, not that we'll get that close in reality but hey let's do it right
prange_obs[-1] = [numpy.linalg.norm(llh2ecef((llhpos[0], llhpos[1], altitude)))]
xyzpos_corr = mlat_iter(rel_stations, prange_obs, xyzpos) #start off with a really close guess
llhpos = ecef2llh(xyzpos_corr+me)

8
src/python/testparse.py Normal file
View File

@ -0,0 +1,8 @@
#!/usr/bin/env python
import modes_print
infile = open("27augrudi3.txt")
printer = modes_print.modes_output_print([37.409348,-122.07732])
for line in infile:
printer.parse(line)