Commit 6ef1f79f authored by Bryan Cazabonne's avatar Bryan Cazabonne
Browse files

Merge branch 'develop' into release-11.0

parents 0afde384 f0165a12
......@@ -35,6 +35,13 @@
to add several step handlers for the same orbit propagation, a new
event detector for angular separation as seen from the spacecraft.
See the list below for a full description of the changes.">
<action dev="bryan" type="update" issue="766" due-to="Gowtham Sivaraman">
Allowed setting of AttitudeProvider to the BoundedPropagator
generated via propagation.
</action>
<action dev="bryan" type="fix" issue="835">
Fixed format symbols for year, month, day in DateComponents#toString().
</action>
<action dev="thomas" type="fix" issue="668">
Added a new event detector for angular separation as seen from the spacecraft.
</action>
......
......@@ -54,9 +54,7 @@ import java.nio.file.Paths;
* Some implementations may fail if the {@link #getOpener() opener}'s
* {@link Opener#openStreamOnce() openStreamOnce} or {@link Opener#openReaderOnce() openReaderOnce}
* methods are called several times or are both called separately. This is particularly
* true in network-based streams.
* <p>
* This class is a simple container without any processing methods.
* true for network-based streams.
* </p>
* @see DataFilter
* @author Luc Maisonobe
......@@ -134,7 +132,7 @@ public class DataSource {
* stream, i.e. to delay this opening (or not open the stream at all).
* It is <em>not</em> intended to open the stream several times. Some
* implementations may fail if an attempt to open a stream several
* times is made. This is particularly true in network-based streams.
* times is made. This is particularly true for network-based streams.
* </p>
* @return opened stream
* @exception IOException if stream cannot be opened
......@@ -151,7 +149,7 @@ public class DataSource {
* stream, i.e. to delay this opening (or not open the stream at all).
* It is <em>not</em> intended to open the stream several times. Some
* implementations may fail if an attempt to open a stream several
* times is made. This is particularly true in network-based streams.
* times is made. This is particularly true for network-based streams.
* </p>
* @return opened stream
* @exception IOException if stream cannot be opened
......@@ -187,7 +185,7 @@ public class DataSource {
* the {@link #openReaderOnce() characters stream} separately (but opening
* the reader may be implemented by opening the binary stream or vice-versa).
* Implementations may fail if an attempt to open a stream several times is
* made. This is particularly true in network-based streams.
* made. This is particularly true for network-based streams.
* </p>
* @return opened stream or null if there are no data streams at all
* @exception IOException if stream cannot be opened
......@@ -203,7 +201,7 @@ public class DataSource {
* the {@link #openReaderOnce() characters stream} separately (but opening
* the reader may be implemented by opening the binary stream or vice-versa).
* Implementations may fail if an attempt to open a stream several times is
* made. This is particularly true in network-based streams.
* made. This is particularly true for network-based streams.
* </p>
* @return opened reader or null if there are no data streams at all
* @exception IOException if stream cannot be opened
......
......@@ -21,6 +21,7 @@ import java.util.Map;
import org.hipparchus.ode.DenseOutputModel;
import org.hipparchus.ode.ODEStateAndDerivative;
import org.orekit.attitudes.AttitudeProvider;
import org.orekit.errors.OrekitException;
import org.orekit.errors.OrekitMessages;
import org.orekit.frames.Frame;
......@@ -216,6 +217,17 @@ public class IntegratedEphemeris
throw new OrekitException(OrekitMessages.NON_RESETABLE_STATE);
}
/** {@inheritDoc} */
@Override
public void setAttitudeProvider(final AttitudeProvider attitudeProvider) {
super.setAttitudeProvider(attitudeProvider);
if (mapper != null) {
// At the construction, the mapper is not set yet
// However, if the attitude provider is changed afterwards, it must be changed in the mapper too
mapper.setAttitudeProvider(attitudeProvider);
}
}
/** {@inheritDoc} */
public SpacecraftState getInitialState() {
return updateAdditionalStates(basicPropagate(getMinDate()));
......
......@@ -39,15 +39,15 @@ public abstract class StateMapper {
/** Position angle type. */
private final PositionAngle angleType;
/** Attitude provider. */
private final AttitudeProvider attitudeProvider;
/** Central attraction coefficient. */
private final double mu;
/** Inertial frame. */
private final Frame frame;
/** Attitude provider. */
private AttitudeProvider attitudeProvider;
/** Simple constructor.
* <p>
* The position parameter type is meaningful only if {@link
......@@ -89,11 +89,6 @@ public abstract class StateMapper {
return orbitType;
}
/** Set position angle type.
*/
public void setPositionAngleType() {
}
/** Get propagation parameter type.
* @return angle type to use for propagation
*/
......@@ -122,6 +117,13 @@ public abstract class StateMapper {
return attitudeProvider;
}
/** Set the attitude provider.
* @param attitudeProvider the provider to set
*/
public void setAttitudeProvider(final AttitudeProvider attitudeProvider) {
this.attitudeProvider = attitudeProvider;
}
/** Map the raw double time offset to a date.
* @param t date offset
* @return date
......
......@@ -18,6 +18,8 @@ package org.orekit.time;
import java.io.Serializable;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.util.Locale;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
......@@ -119,11 +121,14 @@ public class DateComponents implements Serializable, Comparable<DateComponents>
/** Factory for non-leap years. */
private static final MonthDayFactory COMMON_YEAR_FACTORY = new CommonYearFactory();
/** Formatting symbols used in {@link #toString()}. */
private static final DecimalFormatSymbols US_SYMBOLS = new DecimalFormatSymbols(Locale.US);
/** Format for years. */
private static final DecimalFormat FOUR_DIGITS = new DecimalFormat("0000");
private static final DecimalFormat FOUR_DIGITS = new DecimalFormat("0000", US_SYMBOLS);
/** Format for months and days. */
private static final DecimalFormat TWO_DIGITS = new DecimalFormat("00");
private static final DecimalFormat TWO_DIGITS = new DecimalFormat("00", US_SYMBOLS);
/** Offset between J2000 epoch and modified julian day epoch. */
private static final int MJD_TO_J2000 = 51544;
......
# internal error, please notify development team by creating a new topic at {0}
INTERNAL_ERROR = <MISSING TRANSLATION>
INTERNAL_ERROR = eroare internă, vă rugăm să informați echipa de dezvoltare prin adăugarea unui tichet la {0}
# altitude ({0} m) is below the {1} m allowed threshold
ALTITUDE_BELOW_ALLOWED_THRESHOLD = altitudinea ({0} m) este sub limita permisă de {1} m
......@@ -68,7 +68,7 @@ NO_EARTH_ORIENTATION_PARAMETERS_LOADED = niciun Parametru de Orientare al Pămâ
MISSING_EARTH_ORIENTATION_PARAMETERS_BETWEEN_DATES = Parametrii de Orientare ai Pământului lipsesc între {0} și {1}
# missing Earth Orientation Parameters between {0} and {1}, gap is {2,number,0.0##############E0} s
MISSING_EARTH_ORIENTATION_PARAMETERS_BETWEEN_DATES_GAP = <MISSING TRANSLATION>
MISSING_EARTH_ORIENTATION_PARAMETERS_BETWEEN_DATES_GAP = Parametrii de Orientare ai Pământului lipsesc între {0} și {1}, decalajul este de {2,number,0.0##############E0} s
# missing Earth Orientation Parameters
NO_EARTH_ORIENTATION_PARAMETERS = Parametrii de Orientare ai Pământului lipsesc
......@@ -83,7 +83,7 @@ INCONSISTENT_DATES_IN_IERS_FILE = datele {1}-{2}-{3} și MJD {4} sunt inconsiste
UNEXPECTED_DATA_AFTER_LINE_IN_FILE = date neașteptate după linia {0} din fișierul {1}: {2}
# unexpected data at line {0} in file {1}
UNEXPECTED_DATA_AT_LINE_IN_FILE = <MISSING TRANSLATION>
UNEXPECTED_DATA_AT_LINE_IN_FILE = date neașteptate la linia {0} din fișierul {1}
# non-chronological dates in file {0}, line {1}
NON_CHRONOLOGICAL_DATES_IN_FILE = date necronologice în fișierul {0}, linia {1}
......@@ -107,7 +107,7 @@ UNEXPECTED_END_OF_FILE_AFTER_LINE = final neașteptat de fișier {0} (după lini
UNABLE_TO_PARSE_LINE_IN_FILE = imposibil de analizat linia {0} din fișierul {1}:\n{2}
# unable to parse element {0} at line {1}, file {2}
UNABLE_TO_PARSE_ELEMENT_IN_FILE = <MISSING TRANSLATION>
UNABLE_TO_PARSE_ELEMENT_IN_FILE = imposibil de analizat elementul {0} de la linia {1}, fișierul {2}
# unable to find file {0}
UNABLE_TO_FIND_FILE = imposibil de găsit fișierul {0}
......@@ -152,8 +152,7 @@ TOO_LARGE_DEGREE_FOR_GRAVITY_FIELD = grad prea mare (n = {0}, gradul maxim al po
TOO_LARGE_ORDER_FOR_GRAVITY_FIELD = ordin prea mare (n = {0}, ordinul maxim al potențialului este {1})
# several reference dates ({0} and {1} differ by {3,number,0.0##############E0} s) found in gravity field file {2}
# SEVERAL_REFERENCE_DATES_IN_GRAVITY_FIELD = mai multe date de referință ({0} and {1}) descoperite în fișierul de câmp gravitațional {2}
SEVERAL_REFERENCE_DATES_IN_GRAVITY_FIELD = <MISSING TRANSLATION>
SEVERAL_REFERENCE_DATES_IN_GRAVITY_FIELD = mai multe date de referință ({0} and {1} separate de {3,number,0.0##############E0} s) descoperite în fișierul de câmp gravitațional {2}
# no TLE data available for object {0}
NO_TLE_FOR_OBJECT = nu sunt disponibile date TLE pentru obiectul {0}
......@@ -237,8 +236,7 @@ FRAMES_MISMATCH = sistemul de referință {0} nu corespunde sistemului de referi
INITIAL_STATE_NOT_SPECIFIED_FOR_ORBIT_PROPAGATION = stare inițială nespecificată pentru propagarea orbitală
# target event date must be before {1} by {3,number,0.0##############E0} seconds or after {2} by {3,number,0.0##############E0} seconds, but target event date {0} is {4,number,0.0##############E0} seconds before {1} and {5,number,0.0##############E0} seconds after {2} so it cannot be added
# EVENT_DATE_TOO_CLOSE = data evenimentului {0}, mai mare decât {1} minus {3} secunde și mai mică decât {2} plus {3} secunde, nu poate fi adăugată
EVENT_DATE_TOO_CLOSE = <MISSING TRANSLATION>
EVENT_DATE_TOO_CLOSE = data evenimentului țintă trebuie să fie înainte de {1} cu {3,number,0.0##############E0} secunde sau după [2} cu {3,number,0.0##############E0} secunde, dar data evenimentului țintă {0} este cu {4,number,0.0##############E0} secunde înainte de {1} și {5,number,0.0##############E0} sende după {2} deci nu poate fi adăugată
# unable to read header record from JPL ephemerides binary file {0}
UNABLE_TO_READ_JPL_HEADER = imposibil de citit antetul fișierului de efemeride JPL {0}
......@@ -268,10 +266,10 @@ OUT_OF_RANGE_BODY_EPHEMERIDES_DATE = dată în afara domeniului de valabilitate
OUT_OF_RANGE_EPHEMERIDES_DATE = dată în afara domeniului de valabilitate al efemeridelor : {0}, [{1}, {2}]
# out of range date for ephemerides: {0} is {3,number,0.0##############E0} s before [{1}, {2}]
OUT_OF_RANGE_EPHEMERIDES_DATE_BEFORE = <MISSING TRANSLATION>
OUT_OF_RANGE_EPHEMERIDES_DATE_BEFORE = dată în afara domeniului de valabilitate al efemeridelor : {0} este cu {3,number,0.0##############E0} s înainte de [{1}, {2}]
# out of range date for ephemerides: {0} is {3,number,0.0##############E0} s after [{1}, {2}]
OUT_OF_RANGE_EPHEMERIDES_DATE_AFTER = <MISSING TRANSLATION>
OUT_OF_RANGE_EPHEMERIDES_DATE_AFTER = dată în afara domeniului de valabilitate al efemeridelor : {0} este cu {3,number,0.0##############E0} s după [{1}, {2}]
# unexpected two elevation values: {0} and {1}, for one azimuth: {2}
UNEXPECTED_TWO_ELEVATION_VALUES_FOR_ONE_AZIMUTH = date neașteptate, două valori ale elevației: {0} and {1}, pentru o singură valoare a azimutului: {2}
......@@ -322,10 +320,10 @@ CCSDS_DATE_INVALID_LENGTH_TIME_FIELD = lungime nevalidă pentru câmpul timp din
CCSDS_DATE_MISSING_AGENCY_EPOCH = epocă de referință specifică agenției lipsă din data CCSDS
# missing mandatory key {0} in CCSDS file {1}
CCSDS_MISSING_KEYWORD = <MISSING TRANSLATION>
CCSDS_MISSING_KEYWORD = cheia esențială {0} lipsește din fișierul CCSDS {1}
# key {0} is not allowed in format version {1}
CCSDS_KEYWORD_NOT_ALLOWED_IN_VERSION = <MISSING TRANSLATION>
CCSDS_KEYWORD_NOT_ALLOWED_IN_VERSION = cheia {0} nu este păermisă în versiunea {1}
# unexpected keyword in CCSDS line number {0} of file {1}:\n{2}
CCSDS_UNEXPECTED_KEYWORD = cuvânt cheie neașteptat la linia {0} din fișierul CCSDS {1}:\n{2}
......@@ -349,10 +347,10 @@ CCSDS_INCONSISTENT_TIME_SYSTEMS = sisteme de timp incoerente : {0} ≠ {1}
CCSDS_TDM_KEYWORD_NOT_FOUND = Niciun cuvânt cheie CCSDS TDM nu a fost găsit la linia {0} din fișierul {1}:\n{2}
# no Range Units converter configured for parsing Tracking Data Message
CCSDS_TDM_MISSING_RANGE_UNITS_CONVERTER = <MISSING TRANSLATION>
CCSDS_TDM_MISSING_RANGE_UNITS_CONVERTER = niciun convertor de unitate pseudo-distanță configurat pentru citirea „Tracking Data Message”
# Time system should have already been set before line {0} of file {1}
CCSDS_TIME_SYSTEM_NOT_READ_YET = <MISSING TRANSLATION>
CCSDS_TIME_SYSTEM_NOT_READ_YET = Sistemul de timp ar fi trebuit deja configurat înaintea liniei {0} din fișierul {1}
# name "{0}" is already used for an additional state
ADDITIONAL_STATE_NAME_ALREADY_IN_USE = numele "{0}" este deja folosit pentru o stare adițională
......@@ -373,19 +371,19 @@ DSST_SPR_SHADOW_INCONSISTENT = inconsistență în calculul umbrei: intrare = {0
DSST_ECC_NO_NUMERICAL_AVERAGING_METHOD = Orbita curentă are o eccentricitate {0} > 0.5 care necesită pentru calculul ratelor medii o metodă numerică cu dependență de timp neimplementată încă
# unsupported sp3 file version {0}
SP3_UNSUPPORTED_VERSION = versiunea fișierului sp3 {0} nesuportată
SP3_UNSUPPORTED_VERSION = versiune neacceptată a fișierului sp3 {0}
# found {0} epochs in file {1}, expected {2}
SP3_NUMBER_OF_EPOCH_MISMATCH = găsite {0} date de referință în fișierul {1}, așteptate {2}
SP3_NUMBER_OF_EPOCH_MISMATCH = au fost găsite {0} date de referință în fișierul {1}, așteptate {2}
# unexpected end of file in sp3 file (after line {0})
SP3_UNEXPECTED_END_OF_FILE = sfârșit neașteptat al fișierului sp3 (după linia {0})
# unsupported clock file version {0}
CLOCK_FILE_UNSUPPORTED_VERSION = versiunea fișierului de ceas {0} nesuportată
CLOCK_FILE_UNSUPPORTED_VERSION = versiune neacceptată a fișierului de ceas {0}
# unsupported navigation messages file version {0}
NAVIGATION_FILE_UNSUPPORTED_VERSION = <MISSING TRANSLATION>
NAVIGATION_FILE_UNSUPPORTED_VERSION = versiune neacceptată a fișierului cu mesaje de navigație {0}
# non-existent geomagnetic model {0} for year {1}
NON_EXISTENT_GEOMAGNETIC_MODEL = fișierul de model geomagnetic {0} inexistent pentru anul {1}
......@@ -406,19 +404,16 @@ NOT_ENOUGH_CACHED_NEIGHBORS = numărul de vecini din cache este prea mic: {0} (
NO_CACHED_ENTRIES = nu există înregistrări în cache
# generated entries not sorted: {0} > {1} by {2,number,0.0##############E0} s
# NON_CHRONOLOGICALLY_SORTED_ENTRIES = înregistrările generate nu sunt în ordine cronologică: {0} > {1}
NON_CHRONOLOGICALLY_SORTED_ENTRIES = <MISSING TRANSLATION>
NON_CHRONOLOGICALLY_SORTED_ENTRIES = înregistrările generate nu sunt în ordine cronologică: {0} > {1} cu {2,number,0.0##############E0} s
# no data generated around date: {0}
NO_DATA_GENERATED = nu există informații generate în jurul datei: {0}
# unable to generate new data before {0}, but data is requested for {1} which is {2,number,0.0##############E0} s before
# UNABLE_TO_GENERATE_NEW_DATA_BEFORE = imposibil de generat noi date înainte de {0}, date solicitate pentru {1}
UNABLE_TO_GENERATE_NEW_DATA_BEFORE = <MISSING TRANSLATION>
UNABLE_TO_GENERATE_NEW_DATA_BEFORE = imposibil de generat noi date înainte de {0}, dar există date solicitate pentru {1} care este anterior cu {2,number,0.0##############E0} s
# unable to generate new data after {0}, but data is requested for {1} which is {2,number,0.0##############E0} s after
# UNABLE_TO_GENERATE_NEW_DATA_AFTER = imposibil de generat noi date după {0}, date solicitate pentru {1}
UNABLE_TO_GENERATE_NEW_DATA_AFTER = <MISSING TRANSLATION>
UNABLE_TO_GENERATE_NEW_DATA_AFTER = imposibil de generat noi date înainte de {0}, dar există date solicitate pentru {1} care este ulterior cu {2,number,0.0##############E0} s
# unable to compute hyperbolic eccentric anomaly from the mean anomaly after {0} iterations
UNABLE_TO_COMPUTE_HYPERBOLIC_ECCENTRIC_ANOMALY = imposibil de calculat anomalia excentrică hiperbolică pornind de la anomalia medie după {0} iterații
......@@ -475,22 +470,22 @@ CCSDS_AEM_INCONSISTENT_TIME_SYSTEMS = sisteme de timp inconsistente în blocuril
CCSDS_AEM_ATTITUDE_TYPE_NOT_IMPLEMENTED = tipul de atitudine {0} din fișierele CCSDS AEM nu este implementat în Orekit
# invalid rotation sequence {0} at line {1} of file {2}
CCSDS_INVALID_ROTATION_SEQUENCE = <MISSING TRANSLATION>
CCSDS_INVALID_ROTATION_SEQUENCE = secvență de rotație {0} invalidă la linia {1} din fișierul {2}
# element set type {0} ({1}) is not supported yet
CCSDS_UNSUPPORTED_ELEMENT_SET_TYPE = <MISSING TRANSLATION>
CCSDS_UNSUPPORTED_ELEMENT_SET_TYPE = tipul de set de elemente {0} ({1}) nu este încă permis
# retrograde factor not supported in element set {0}
CCSDS_UNSUPPORTED_RETROGRADE_EQUINOCTIAL = <MISSING TRANSLATION>
CCSDS_UNSUPPORTED_RETROGRADE_EQUINOCTIAL = factorul retrograd nu este permis în setul de elemente {0}
# element set type {0} ({1}) expects {2} elements
CCSDS_ELEMENT_SET_WRONG_NB_COMPONENTS = <MISSING TRANSLATION>
CCSDS_ELEMENT_SET_WRONG_NB_COMPONENTS = tipul de set de elemente {0} ({1}) așteaptă {2} elemente
# wrong number of units for maneuver {0}
CCSDS_MANEUVER_UNITS_WRONG_NB_COMPONENTS = <MISSING TRANSLATION>
CCSDS_MANEUVER_UNITS_WRONG_NB_COMPONENTS = număr greșit al unităților pentru manevra {0}
# missing time field for maneuver {0}
CCSDS_MANEUVER_MISSING_TIME = <MISSING TRANSLATION>
CCSDS_MANEUVER_MISSING_TIME = lipsește câmpul de timp pentru manevra {0}
# Creating an aggregate propagator requires at least one constituent propagator, but none were provided.
NOT_ENOUGH_PROPAGATORS = Crearea unui propagator combinator necesită cel puțin un propagator, dar nici unul nu a fost definit.
......@@ -636,15 +631,13 @@ INVALID_MEASUREMENT_TYPES_FOR_COMBINATION_OF_MEASUREMENTS = tipurile de măsuri
INCOMPATIBLE_FREQUENCIES_FOR_COMBINATION_OF_MEASUREMENTS = frecvențele {0} și {1} sunt incompatibile pentru combinația {2}
# observations are not in chronological order: {0} is {2,number,0.0##############E0} s after {1}
# NON_CHRONOLOGICAL_DATES_FOR_OBSERVATIONS = observațiile de la datele {0} și {1} nu sunt în ordine cronologică
NON_CHRONOLOGICAL_DATES_FOR_OBSERVATIONS = <MISSING TRANSLATION>
NON_CHRONOLOGICAL_DATES_FOR_OBSERVATIONS = observațiile nu sunt în ordine cronologică: {0} este cu {2,number,0.0##############E0} s după {1}
# Use of the ExceptionalDataContext detected. This is typically used to detect developer errors.
EXCEPTIONAL_DATA_CONTEXT = a fost detectată o utilizare a ExceptionalDataContext. Acest mecanism este utilizat în general pentru detectarea unei erori de dezvoltare
# Observations must have different dates: {0}, {1} ({3,number,0.0##############E0} s from first observation), and {2} ({4,number,0.0##############E0} s from first observation, {5,number,0.0##############E0} s from second observation)
# NON_DIFFERENT_DATES_FOR_OBSERVATIONS = observațiile {0}, {1} și {2} trebuie să aibă date diferite
NON_DIFFERENT_DATES_FOR_OBSERVATIONS = <MISSING TRANSLATION>
NON_DIFFERENT_DATES_FOR_OBSERVATIONS = Observațiile trebuie să aibă date diferite: {0}, {1} ({3,number,0.0##############E0} s de la prima observație), și {2} ({4,number,0.0##############E0} s de la prima observație, {5,number,0.0##############E0} s de la a doua observație)
# observations are not in the same plane
NON_COPLANAR_POINTS = observațiile nu sunt în același plan
......@@ -677,73 +670,73 @@ INVALID_RANGE_INDICATOR_IN_CRD_FILE = Indicatorul de interval {0} este nevalid
CRD_UNEXPECTED_END_OF_FILE = Sfârșit neașteptat al fișierului CRD (după linia {0})
# end of encoded message reached
END_OF_ENCODED_MESSAGE = <MISSING TRANSLATION>
END_OF_ENCODED_MESSAGE = sfârșitul mesajului codificat a fost atins
# too large data type ({0} bits)
TOO_LARGE_DATA_TYPE = <MISSING TRANSLATION>
TOO_LARGE_DATA_TYPE = tip de date prea mare ({0} biți)
# unknown encoded message number {0}
UNKNOWN_ENCODED_MESSAGE_NUMBER = <MISSING TRANSLATION>
UNKNOWN_ENCODED_MESSAGE_NUMBER = număr {0} necunoscut pentru mesajului codificat
# unknown authentication method: {0}
UNKNOWN_AUTHENTICATION_METHOD = <MISSING TRANSLATION>
UNKNOWN_AUTHENTICATION_METHOD = metodă de autentificare necunoscută: {0}
# unknown carrier phase code: {0}
UNKNOWN_CARRIER_PHASE_CODE = <MISSING TRANSLATION>
UNKNOWN_CARRIER_PHASE_CODE = cod de fază purtător necunoscut: {0}
# unknown data format: {0}
UNKNOWN_DATA_FORMAT = <MISSING TRANSLATION>
UNKNOWN_DATA_FORMAT = format al datelor necunoscut: {0}
# unknown navigation system: {0}
UNKNOWN_NAVIGATION_SYSTEM = <MISSING TRANSLATION>
UNKNOWN_NAVIGATION_SYSTEM = sistem de navigație necunoscut: {0}
# data stream {0} requires a NMEA fix data
STREAM_REQUIRES_NMEA_FIX = <MISSING TRANSLATION>
STREAM_REQUIRES_NMEA_FIX = fluxul de date {0} necesită o data NMEA fixă
# failed authentication for mountpoint {0}
FAILED_AUTHENTICATION = <MISSING TRANSLATION>
FAILED_AUTHENTICATION = autentificare eșuată pentru punctul de montare {0}
# error connecting to {0}: {1}
CONNECTION_ERROR = <MISSING TRANSLATION>
CONNECTION_ERROR = eroare de conectare la {0}: {1}
# unexpected content type {0}
UNEXPECTED_CONTENT_TYPE = <MISSING TRANSLATION>
UNEXPECTED_CONTENT_TYPE = tip de conținut {0} neașteptat
# cannot parse GNSS data from {0}
CANNOT_PARSE_GNSS_DATA = <MISSING TRANSLATION>
CANNOT_PARSE_GNSS_DATA = imposibil de analizat datele GNSS de la {0}
# unknown host {0}
UNKNOWN_HOST = <MISSING TRANSLATION>
UNKNOWN_HOST = mașina gazdă {0} este necunoscută
# error parsing sourcetable line {0} from {1}: {2}
SOURCETABLE_PARSE_ERROR = <MISSING TRANSLATION>
SOURCETABLE_PARSE_ERROR = eroare la citirea liniei {0} din tabelul sursă {1}: {2}
# cannot parse sourcetable from {0}
CANNOT_PARSE_SOURCETABLE = <MISSING TRANSLATION>
CANNOT_PARSE_SOURCETABLE = eroare la citirea tabelului sursă de la {0}
# mount point {0} is already connected
MOUNPOINT_ALREADY_CONNECTED = <MISSING TRANSLATION>
MOUNPOINT_ALREADY_CONNECTED = punctul de montare {0} este deja conectat
# missing header from {0}: {1}
MISSING_HEADER = <MISSING TRANSLATION>
MISSING_HEADER = lipsă antet de la {0}: {1}
# {0} is not a valid international designator
NOT_VALID_INTERNATIONAL_DESIGNATOR = <MISSING TRANSLATION>
NOT_VALID_INTERNATIONAL_DESIGNATOR = {0} nu este un identificator internațional valid
# value for key {0} has not been initialized
UNINITIALIZED_VALUE_FOR_KEY = <MISSING TRANSLATION>
UNINITIALIZED_VALUE_FOR_KEY = valoarea pentru cheia {0} nu a fost inițializată
# unknown unit {0}
UNKNOWN_UNIT = <MISSING TRANSLATION>
UNKNOWN_UNIT = unitatea {0} este necunoscută
# units {0} and {1} are not compatible
INCOMPATIBLE_UNITS = <MISSING TRANSLATION>
INCOMPATIBLE_UNITS = unitățile {0} și {1} sunt incompatibile
# missing velocity data
MISSING_VELOCITY = <MISSING TRANSLATION>
MISSING_VELOCITY = lipsesc informații referitoare la viteză
# attempt to generate file {0} with a formatting error
ATTEMPT_TO_GENERATE_MALFORMED_FILE = <MISSING TRANSLATION>
ATTEMPT_TO_GENERATE_MALFORMED_FILE = încercare de generare a fișierului {0} ce conține o eroare de formatare
# {0} failed to find root between {1} (g={2,number,0.0##############E0}) and {3} (g={4,number,0.0##############E0})\nLast iteration at {5} (g={6,number,0.0##############E0})
FIND_ROOT = <MISSING TRANSLATION>
FIND_ROOT = {0} nu a găsit rădăcina între {1} (g={2,number,0.0##############E0}) și {3} (g={4,number,0.0##############E0})\nUltima iterație la {5} (g={6,number,0.0##############E0})
......@@ -53,7 +53,7 @@ vector block, Keplerian elements block, maneuvers block in OPM), then
there is one dedicated class for each logical block.
The top-level message also contains some Orekit-specific data that are mandatory
for building some objects but is not present in the CCSDS messages. This
for building some objects but are not present in the CCSDS messages. This
includes for example IERS conventions, data context, and gravitational
coefficient for ODM as it is sometimes optional in these messages.
......@@ -63,7 +63,7 @@ but a flat structure was used.
This organization implies that users wishing to access raw internal entries must
walk through the hierarchy. For message types that allow only one segment, there
are shortcuts to use `message.getMetadata()` and `message.getData()` instead of
are shortcuts to use `message.getMetadata()` and `message.getData()` in addtion to
`message.getSegments().get(0).getMetadata()` and `message.getSegments().get(0).getData()`
respectively. Where it is relevant, other shortcuts are provided to access
Orekit-compatible objects as shown in the following code snippet:
......@@ -76,7 +76,7 @@ Orekit-compatible objects as shown in the following code snippet:
AbsoluteDate orbitDate = opm.getSegments().get(0).get(Data).getStateVectorBlock().getEpoch();
Messages can be obtained by parsing an existing message or by using
the setters to create it from scratch, bottom up starting from the
the setters to create them from scratch, bottom up starting from the
raw elements and building up through logical blocks, data, metadata,
segments, header and finally message.
......@@ -84,12 +84,12 @@ segments, header and finally message.
Parsing a text message to build some kind of `Ndm` object is performed
by setting up a parser. Each message type has its own parser, but a single
`ParserBuilder` can build all of them. Once created, the parser `parseMessage`
`ParserBuilder` can build all parsers type. Once created, the parser `parseMessage`
method is called with a data source. It will return the parsed message as a
hierarchical container as depicted in the previous section.
The Orekit-specific data that are mandatory for building some objects but are
not present in the CCSDS messages are set up when building the `ParserBuilder`.
not present in the CCSDS messages are set up beforehand when building the `ParserBuilder`.
This includes for example IERS conventions, data context, and gravitational
coefficient for ODM as it is sometimes optional in these messages.
......@@ -113,7 +113,7 @@ even that "listing of units via the [insert keyword here] keyword does
not override the mandatory units specified in the selected [insert type here]".
This would mean that `IGNORE_PARSE` should be used for compliance with the
standard and messages specifying wrong units should be accepted silently. Other
places set that the tables specify "the units to be used" and that "If units
places state that the tables specify "the units to be used" and that "If units
are displayed, they must exactly match the units (including lower/upper case)
as specified in tables". This would mean that `STRICT_COMPLIANCE` should be used
for compliance with the standard and messages specifying wrong units should be
......@@ -124,9 +124,9 @@ were really used for producing the message, we consider that `CONVERT_COMPATIBLE
is a good trade-off for leniency. The default setting is therefore to set the
`ParseBuilder` behavior to `CONVERT_COMPATIBLE`, but users can configure
their builder differently to suit their needs. The units parser used in
Orekit is also feature-rich and known how to handle units written with
Orekit is also feature-rich and knows how to handle units written with
human-friendly unicode characters, like for example km/s² or √km (whereas
CCSDS standard would use km/s**2 or km**0.5).
CCSDS standard would use km/s\*\*2 or km\*\*0.5).
One change introduced in Orekit 11.0 is that the progressive set up of
parsers using the fluent API (methods `withXxx()`) has been moved to the top-level
......@@ -137,7 +137,9 @@ to use parsers is then to set up one `ParserBuilder` and to call its `buildXymPa
methods from within each thread to dedicate one parser for each message and drop it
afterwards. In single-threaded cases, parsers used from within a loop can be reused
safely after the `parseMethod` has returned, but building a new parser from the
builder is simple.
builder is simple and has little overhead, so asking the existing `ParseBuilder` to
build a new parser for each message is still the recommended way in single-threaded
applications.
Parsers automatically recognize if the message is in Key-Value Notation (KVN) or in
eXtended Markup Language (XML) format and adapt accordingly. This is
......@@ -157,8 +159,8 @@ The `EphemerisFileParser` interface defines a `parse(dataSource)` method that
is similar to the CCSDS-specific `parseMessage(dataSource)` method.
As the parsers are parameterized with the type of the parsed message, the `parseMessage`
and `parse` methods in all parsers already have the specific type. There is no need
to cast the returned value as in pre-11.0 versions of Orekit.
and `parse` methods in all parsers already return an object with the proper specific message
type. There is no need to cast the returned value as was done in pre-11.0 versions of Orekit.
The following code snippet shows how to parse an OEM, in this case using a file
name to create the data source, and using the default values for the parser builder:
......@@ -172,10 +174,10 @@ type and using a low level generator corresponding to the desired message format
`KvnGenerator` for Key-Value Notation or `XmlGenerator` for eXtended Markup Language.
All CCSDS messages have a corresponding writer that implements the CCSDS-specific
`MessageWriter` interface. This interface allows to writer either an already built
`MessageWriter` interface. This interface allows to write either an already built
message, or separately the header first and then looping to write the segments.
Ephemeris-type messages (AEM, OEM and OCM) implement the generic ephemeris writer
Ephemeris-type messages (AEM, OEM and OCM) also implement the generic ephemeris writer
interfaces (`AttitudeEphemerisFileWriter` and `EphemerisFileWriter`) in addition
to the CCSDS-specific interface, so they can be used in a more general
way when ephemerides data is built from non-CCSDS data. The generic `write` methods
......@@ -184,12 +186,12 @@ in these interfaces take as arguments objects that implement the generic
interfaces. As these interfaces do not provide access to header and metadata informations
that CCSDS writers need, these informations must be provided beforehand to the
writers. This is done by providing directly the header and a metadata template in
the constructor of the writer. Of course, non-CCSDS writers would use different
strategies to get their specific metadata. The metadata provided is only a template that
is incomplete: the frame, start time and stop time will be filled later on when
the data to be written is available, as they will change for each segment. The
argument used as the template is not modified when building a writer, its content
is copied in an internal object that is modified by adding the proper frame and
the constructor of the writer. Of course, writers for non-CCSDS message formats would use
different strategies to get their specific metadata. In the CCCSDS case, the metadata
provided is only a template that is incomplete: the frame, start time and stop time will
be filled later on when the data to be written is available, as they will change for each
segment. The argument used as the template is not modified when building a writer, its
content is copied in an internal object that is modified by adding the proper frame and
time data when each segment is created.
Ephemeris-type messages can also be used in a streaming way (with specific
......@@ -198,9 +200,11 @@ on-the-fly by a propagator. These specific writers provide a `newSegment()` meth
returns a fixed step handler to register to the propagator. If ephemerides must be split
into different segments, in order to prevent interpolation between two time ranges
separated by a discrete event like a maneuver, then a new step handler must be retrieved
using the `newSegment()` method at discrete event time and a new propagator must be used.
using the `newSegment()` method at discrete event time and a new propagator must be used
(or `propagator.getMultiplexer().remove(oldSegmentHandler)` and
`propagator.getMultiplexer().add(newSegmentHandler)` must be called appropriately).
All segments will be gathered properly in the generated CCSDS message. Using the same
propagator and same event handler would not work as expected. The propagator would run
propagator and same event handler would not work as expected: the propagator would run
just fine through the discrete event that would reset the state, but the ephemeris would
not be aware of the change and would just continue the same segment. Upon reading the
message produced this way, the reader would not be aware that interpolation should not be
......@@ -232,7 +236,7 @@ start of the XML declaration ("<?xml ...>") is found, then `XmlLexicalAnalyzer`
selected, otherwise `KvnLexicalAnalyzer` is selected. Detection works for UCS-4,
UTF-16 and UTF-8 encodings, with or without a Byte Order Mark, and regardless of
endianness. This XML declaration is optional in general-purpose XML documents
(at least for XML 1.0) but CCSDS messages and XML 1.1 spec both require it to be
(at least for XML 1.0) but CCSDS messages and XML 1.1 specification both require it to be
present. After the first few bytes allowing selection have been read, the characters
stream is reset to beginning so the selected lexical analyzer will see these
characters again. This works even if the `DataSource` is a network stream, thanks to
......@@ -250,7 +254,7 @@ The dynamic view of lexical analysis is depicted in the following sequence diagr
![general parsing sequence diagram diagram](../images/design/ccsds-lexical-analysis-sequence-diagram.png)
The second level of parsing is message parsing is semantic analysis. Its aim is
The second level of parsing in message parsing is semantic analysis. Its aim is
to read the stream of `ParseToken` objects and to progressively build the CCSDS message
from them. Semantic analysis of primitive entries like `EPOCH_TZERO = 1998-12-18T14:28:15.1172`
in KVN or `<EPOCH_TZERO>1998-12-18T14:28:15.1172</EPOCH_TZERO>` in XML is independent
......@@ -270,7 +274,7 @@ manage (i.e. a lot of different names a `ParseToken` can have). Prior to version
used a single big enumerate class for all these keys, but it proved unmanageable as more
message types were supported. The framework set up with version 11.0 is based on the fact
these numerous keys belong to a smaller set of logical blocks that are always parsed as a
whole (header, metadata, state vector, covariance...). Parsing can be performed with the
whole (header, metadata, state vector, covariance...). Parsing is therefore performed with the
parser switching between a small number of well-known states. When one state is active,
say metadata parsing, then lookup is limited to the keys allowed in metadata. If an
unknown token arrives, then the parser assumes the current section is finished, and
......@@ -311,7 +315,7 @@ upcoming tokens one after the other. Each processing state may adopt a different
strategy for this, depending on the section it handles. Processing states are
always quite small. Some processing states that can be reused from message type
to message type (like `HeaderProcessingState`, `KvnStructureProcessingState` or
`XmlStructureProcessingstate`) are implemented as separate classes. Other processing
`XmlStructureProcessingstate`) and are implemented as separate classes. Other processing
states that are specific to one message type (and hence to one parser), are
implemented as a single private method within the parser. Method references
are used to point directly to these methods. This allows one parser class to
......@@ -319,7 +323,7 @@ provide simultaneously several implementations of the `ProcessingState` interfac
The following example is extracted from the `TdmParser`, it shows that when a
`DATA_START` key is seen in a KVN message or when a `<data>` start element is
seen in an XML message, then `prepareData` is called and an `ObservationsBlock`
is allocated to hold the upcoming observations, and the fallback processing
is allocated to hold the upcoming observations. Then the fallback processing
state is set to the private method `processDataToken` so that the next token,
which at this stage is expected to be a data token representing an observation,
can be processed properly:
......@@ -330,7 +334,7 @@ can be processed properly:
return true;
}
In many cases, the keys that are allowed in a section are fixed so they are defined
In most cases, the keys that are allowed in a section are fixed so they are defined
in an enumerate. The processing state (in this case often a private method within
the parser) then simply selects the constant corresponding to the token name using
the standard `valueOf` method from the enumerate class and delegates to it the processing
......@@ -341,7 +345,8 @@ section and add their own keys, several enumerate types can be checked in row. A
example of this design is the `processMetadataToken` method in `OemParser`, which is a single
private method acting as a `ProcessingState` and tries the enumerates `MetadataKey`,
`OdmMetadataKey`, `CommonMetadataKey` and finally `OemMetadataKey` to fill up the metadata
section.
section. There are a few cases when this design using an enumerate does not work, for